diff --git a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md index 48efec453f5..e25f0b64ce9 100644 --- a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md +++ b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md @@ -131,8 +131,10 @@ The end of support height is calculated from the current blockchain height: ## Test the Pre-Release -- [ ] Wait until the [Docker binaries have been built on `main`](https://github.com/ZcashFoundation/zebra/actions/workflows/continous-integration-docker.yml), and the quick tests have passed. -- [ ] Wait until the [pre-release deployment machines have successfully launched](https://github.com/ZcashFoundation/zebra/actions/workflows/continous-delivery.yml) +- [ ] Wait until the Docker binaries have been built on `main`, and the quick tests have passed: + - [ ] [ci-unit-tests-docker.yml](https://github.com/ZcashFoundation/zebra/actions/workflows/ci-unit-tests-docker.yml?query=branch%3Amain) + - [ ] [ci-integration-tests-gcp.yml](https://github.com/ZcashFoundation/zebra/actions/workflows/ci-integration-tests-gcp.yml?query=branch%3Amain) +- [ ] Wait until the [pre-release deployment machines have successfully launched](https://github.com/ZcashFoundation/zebra/actions/workflows/cd-deploy-nodes-gcp.yml?query=event%3Arelease) ## Publish Release @@ -148,7 +150,7 @@ The end of support height is calculated from the current blockchain height: and put the output in a comment on the PR. ## Publish Docker Images -- [ ] Wait for the [the Docker images to be published successfully](https://github.com/ZcashFoundation/zebra/actions/workflows/release-binaries.yml). +- [ ] Wait for the [the Docker images to be published successfully](https://github.com/ZcashFoundation/zebra/actions/workflows/release-binaries.yml?query=event%3Arelease). - [ ] Un-freeze the [`batched` queue](https://dashboard.mergify.com/github/ZcashFoundation/repo/zebra/queues) using Mergify. - [ ] Remove `do-not-merge` from the PRs you added it to diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index e6d94a61736..f8ab545c658 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,34 +1,55 @@ ## Motivation - +_What are the most important goals of the ticket or PR?_ + + +### PR Author Checklist + +#### Check before marking the PR as ready for review: + - [ ] Will the PR name make sense to users? + - [ ] Does the PR have a priority label? + - [ ] Have you added or updated tests? + - [ ] Is the documentation up to date? + +##### For significant changes: + - [ ] Is there a summary in the CHANGELOG? + - [ ] Can these changes be split into multiple PRs? + +_If a checkbox isn't relevant to the PR, mark it as done._ ### Specifications + ### Complex Code or Requirements + ## Solution + +### Testing + + + + ## Review + ### Reviewer Checklist - - [ ] Will the PR name make sense to users? - - [ ] Does it need extra CHANGELOG info? (new features, breaking changes, large changes) - - [ ] Are the PR labels correct? - - [ ] Does the code do what the ticket and PR says? - - [ ] Does it change concurrent code, unsafe code, or consensus rules? - - [ ] How do you know it works? Does it have tests? +Check before approving the PR: + - [ ] Does the PR scope match the ticket? + - [ ] Are there enough tests to make sure it works? Do the tests cover the PR motivation? + - [ ] Are all the PR blockers dealt with? + PR blockers can be dealt with in new tickets or PRs. + +_And check the PR Author checklist is complete._ ## Follow Up Work diff --git a/.github/workflows/cd-deploy-nodes-gcp.yml b/.github/workflows/cd-deploy-nodes-gcp.yml index bf6b9cb5842..3c41c296d07 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.yml @@ -148,7 +148,7 @@ jobs: with: test_id: 'custom-conf' docker_image: ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} - grep_patterns: '-e "v1.0.0-rc.2.toml"' + grep_patterns: '-e "loaded zebrad config.*config_path.*=.*v1.0.0-rc.2.toml"' test_variables: '-e NETWORK -e ZEBRA_CONF_PATH="zebrad/tests/common/configs/v1.0.0-rc.2.toml"' network: ${{ inputs.network || vars.ZCASH_NETWORK }} diff --git a/.github/workflows/ci-lint.yml b/.github/workflows/ci-lint.yml index 1a0d44362e5..0146b4f75fa 100644 --- a/.github/workflows/ci-lint.yml +++ b/.github/workflows/ci-lint.yml @@ -37,7 +37,7 @@ jobs: - name: Rust files id: changed-files-rust - uses: tj-actions/changed-files@v39.2.4 + uses: tj-actions/changed-files@v40.0.2 with: files: | **/*.rs @@ -49,7 +49,7 @@ jobs: - name: Workflow files id: changed-files-workflows - uses: tj-actions/changed-files@v39.2.4 + uses: tj-actions/changed-files@v40.0.2 with: files: | .github/workflows/*.yml diff --git a/.github/workflows/ci-unit-tests-docker.patch.yml b/.github/workflows/ci-unit-tests-docker.patch.yml index 3ddb5f51694..c9c69768f54 100644 --- a/.github/workflows/ci-unit-tests-docker.patch.yml +++ b/.github/workflows/ci-unit-tests-docker.patch.yml @@ -26,13 +26,6 @@ on: - '.github/workflows/sub-build-docker-image.yml' jobs: - # We don't patch the testnet job, because testnet isn't required to merge (it's too unstable) - get-available-disks: - name: Check if cached state disks exist for Mainnet / Check if cached state disks exist - runs-on: ubuntu-latest - steps: - - run: 'echo "No build required"' - build: name: Build CI Docker / Build images runs-on: ubuntu-latest diff --git a/.github/workflows/ci-unit-tests-docker.yml b/.github/workflows/ci-unit-tests-docker.yml index 5f90fbf2184..73834aa649f 100644 --- a/.github/workflows/ci-unit-tests-docker.yml +++ b/.github/workflows/ci-unit-tests-docker.yml @@ -237,7 +237,7 @@ jobs: with: test_id: 'custom-conf' docker_image: ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} - grep_patterns: '-e "v1.0.0-rc.2.toml"' + grep_patterns: '-e "loaded zebrad config.*config_path.*=.*v1.0.0-rc.2.toml"' test_variables: '-e NETWORK -e ZEBRA_CONF_PATH="zebrad/tests/common/configs/v1.0.0-rc.2.toml"' network: ${{ inputs.network || vars.ZCASH_NETWORK }} diff --git a/.github/workflows/ci-unit-tests-os.yml b/.github/workflows/ci-unit-tests-os.yml index c433bf82163..4075cbd20fb 100644 --- a/.github/workflows/ci-unit-tests-os.yml +++ b/.github/workflows/ci-unit-tests-os.yml @@ -51,7 +51,6 @@ on: - '**/clippy.toml' # workflow definitions - '.github/workflows/ci-unit-tests-os.yml' - - '.github/workflows/sub-build-docker-image.yml' env: CARGO_INCREMENTAL: ${{ vars.CARGO_INCREMENTAL }} diff --git a/.github/workflows/docs-deploy-firebase.patch.yml b/.github/workflows/docs-deploy-firebase.patch.yml index 04b4bdc46d4..4f6a0c2db8f 100644 --- a/.github/workflows/docs-deploy-firebase.patch.yml +++ b/.github/workflows/docs-deploy-firebase.patch.yml @@ -27,13 +27,6 @@ jobs: steps: - run: 'echo "No build required"' - build-docs-external: - name: Build and Deploy Zebra External Docs - timeout-minutes: 45 - runs-on: ubuntu-latest - steps: - - run: 'echo "No build required"' - build-docs-internal: name: Build and Deploy Zebra Internal Docs timeout-minutes: 45 diff --git a/.github/workflows/docs-deploy-firebase.yml b/.github/workflows/docs-deploy-firebase.yml index 8e886ac4f2c..eadfcfd440e 100644 --- a/.github/workflows/docs-deploy-firebase.yml +++ b/.github/workflows/docs-deploy-firebase.yml @@ -114,62 +114,6 @@ jobs: projectId: ${{ vars.GCP_FIREBASE_PROJECT }} target: docs-book - build-docs-external: - name: Build and Deploy Zebra External Docs - timeout-minutes: 45 - runs-on: ubuntu-latest - permissions: - checks: write - contents: 'read' - id-token: 'write' - pull-requests: write - steps: - - name: Checkout the source code - uses: actions/checkout@v4.1.1 - with: - persist-credentials: false - - - name: Install last version of Protoc - uses: arduino/setup-protoc@v2.1.0 - with: - version: '23.x' - repo-token: ${{ secrets.GITHUB_TOKEN }} - - # Setup Rust with beta toolchain and default profile (to include rust-docs) - - name: Setup Rust - run: | - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=beta --profile=default - - - uses: Swatinem/rust-cache@v2.7.1 - - - name: Build external docs - run: | - # Exclude zebra-utils and zebra-test, they are not for library or app users - cargo doc --no-deps --workspace --all-features --exclude zebra-utils --exclude zebra-test --target-dir "$(pwd)"/target/external - - # Setup gcloud CLI - - name: Authenticate to Google Cloud - id: auth - uses: google-github-actions/auth@v1.1.1 - with: - retries: '3' - workload_identity_provider: '${{ vars.GCP_WIF }}' - service_account: '${{ vars.GCP_FIREBASE_SA }}' - - # TODO: remove this step after issue https://github.com/FirebaseExtended/action-hosting-deploy/issues/174 is fixed - - name: Add $GCP_FIREBASE_SA_PATH to env - run: | - # shellcheck disable=SC2002 - echo "GCP_FIREBASE_SA_PATH=$(cat ${{ steps.auth.outputs.credentials_file_path }} | tr -d '\n')" >> "$GITHUB_ENV" - - - name: Deploy external docs to firebase - uses: FirebaseExtended/action-hosting-deploy@v0.7.1 - with: - firebaseServiceAccount: ${{ env.GCP_FIREBASE_SA_PATH }} - channelId: ${{ env.FIREBASE_CHANNEL }} - target: docs-external - projectId: ${{ vars.GCP_FIREBASE_PROJECT }} - build-docs-internal: name: Build and Deploy Zebra Internal Docs timeout-minutes: 45 diff --git a/.github/workflows/release-binaries.yml b/.github/workflows/release-binaries.yml index f0b60acbad5..977be67d4e8 100644 --- a/.github/workflows/release-binaries.yml +++ b/.github/workflows/release-binaries.yml @@ -18,29 +18,34 @@ jobs: # Each time this workflow is executed, a build will be triggered to create a new image # with the corresponding tags using information from git - # The image will be named `zebra:` - build: - name: Build Release Docker + # The image will be named `zebra:.experimental` + build-experimental: + name: Build Experimental Features Release Docker uses: ./.github/workflows/sub-build-docker-image.yml with: dockerfile_path: ./docker/Dockerfile dockerfile_target: runtime image_name: zebra - features: ${{ vars.RUST_PROD_FEATURES }} + tag_suffix: .experimental + features: ${{ format('{0} {1}', vars.RUST_PROD_FEATURES, vars.RUST_EXPERIMENTAL_FEATURES) }} rust_log: ${{ vars.RUST_LOG }} # This step needs access to Docker Hub secrets to run successfully secrets: inherit - # The image will be named `zebra:.experimental` - build-experimental: - name: Build Experimental Features Release Docker + # The image will be named `zebra:` + # It should be built last, so tags with the same name point to the production build, not the experimental build. + build: + name: Build Release Docker + # Run this build last, regardless of whether experimental worked + needs: build-experimental + if: always() uses: ./.github/workflows/sub-build-docker-image.yml with: dockerfile_path: ./docker/Dockerfile dockerfile_target: runtime image_name: zebra - tag_suffix: .experimental - features: ${{ format('{0} {1}', vars.RUST_PROD_FEATURES, vars.RUST_EXPERIMENTAL_FEATURES) }} + latest_tag: true + features: ${{ vars.RUST_PROD_FEATURES }} rust_log: ${{ vars.RUST_LOG }} # This step needs access to Docker Hub secrets to run successfully secrets: inherit diff --git a/.github/workflows/sub-build-docker-image.yml b/.github/workflows/sub-build-docker-image.yml index 1b929324619..c1d0c0df698 100644 --- a/.github/workflows/sub-build-docker-image.yml +++ b/.github/workflows/sub-build-docker-image.yml @@ -33,6 +33,10 @@ on: test_features: required: false type: string + latest_tag: + required: false + type: boolean + default: false tag_suffix: required: false type: string @@ -88,19 +92,34 @@ jobs: # appends inputs.tag_suffix to image tags/names flavor: | suffix=${{ inputs.tag_suffix }} + latest=${{ inputs.latest_tag }} # generate Docker tags based on the following events/attributes tags: | - type=schedule - # semver and ref,tag automatically add a "latest" tag, but only on stable releases + # These DockerHub release tags support the following use cases: + # - `latest`: always use the latest Zebra release when you pull or update + # - `1`: use the latest Zebra release, but require manual intervention for the next network upgrade + # - `1.x`: update to bug fix releases, but don't add any new features or incompatibilities + # - `v1.x.y` or `1.x.y`: always use the exact version, don't automatically upgrade + # - `sha-zzzzzz`: always use the exact commit (the same as `1.x.y`, but also used in CI and production image tests) + # + # Stopping publishing some tags is a silently breaking change: + # - `1`: doesn't get expected new consensus-compatible releases or bug fixes + # - `1.x`: doesn't get expected bug fixes + # + # `semver` adds a "latest" tag if `inputs.latest_tag` is `true`. type=semver,pattern={{version}} type=semver,pattern={{major}}.{{minor}} type=semver,pattern={{major}} type=ref,event=tag - type=ref,event=branch - type=ref,event=pr + # DockerHub release and CI tags. + # This tag makes sure tests are using exactly the right image, even when multiple PRs run at the same time. type=sha - # edge is the latest commit on the default branch. + # These CI-only tags support CI on PRs, the main branch, and scheduled full syncs. + # These tags do not appear on DockerHub, because DockerHub images are only published on the release event. + type=ref,event=pr + type=ref,event=branch type=edge,enable={{is_default_branch}} + type=schedule # Setup Docker Buildx to allow use of docker cache layers from GH - name: Set up Docker Buildx diff --git a/CHANGELOG.md b/CHANGELOG.md index beda30e185b..0b1544f4737 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,15 +5,65 @@ All notable changes to Zebra are documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org). -## [Zebra 1.4.0](https://github.com/ZcashFoundation/zebra/releases/tag/v1.4.0) - TODO: DATE +## [Zebra 1.4.0](https://github.com/ZcashFoundation/zebra/releases/tag/v1.4.0) - 2023-11-07 -Zebra's mining RPCs are now available in release builds. Our Docker images are significantly smaller, -because the smaller Zcash verification parameters are now built into the `zebrad` binary. -TODO: rest of intro +Zebra's mining RPCs are now available in release builds. Our Docker images are significantly +smaller, because the smaller Zcash verification parameters are now built into the `zebrad` binary. +Zebra has updated to the shared Rust dependencies from the `zcashd` 5.7.0 release. -This release contains the following changes: +Zebra recovers better from brief network interruptions, and avoids some network and verification +denial of service and performance issues. We have restored our macOS tests in CI, and now support +macOS on a best-effort basis. -### Mining RPCs in Production Builds +We have changed our documentation website URL, and we are considering deprecating some Docker image +tags in release 1.5.0 and later. + +### Deprecation Warnings + +This release has the following deprecation warnings: + +#### Warning: Deprecation of DockerHub Image Tags in a future release + +Zebra currently publishes 11 [DockerHub tags](https://hub.docker.com/r/zfnd/zebra/tags) for each new release. +We want to reduce the number of DockerHub tags we publish in a future minor Zebra release. + +Based on usage and user feedback, we could stop publishing: +- The `1` tag, which updates each release until NU6 +- The `1.x` tag, which updates each patch release until the next minor release +- The `1.x.y` tag, which is the same as `v1.x.y` +- The `sha-xxxxxxx` tag, which is the same as `v1.x.y` (for production releases) + +We also want to standardise experimental image tags to `-experimental`, rather than `.experimental`. + +So for release 1.5.0, we might only publish these tags: +- `latest` +- `latest-experimental` (a new tag) +- `v1.5.0` +- `v1.5.0-experimental` + +Please let us know if you need any other tags by [opening a GitHub ticket](https://github.com/ZcashFoundation/zebra/issues/new?assignees=&labels=C-enhancement%2CS-needs-triage&projects=&template=feature_request.yml&title=feature%3A+). + +We recommend using the `latest` tag to always get the most recent Zebra release. + +#### Warning: Documentation Website URL Change + +We have replaced the API documentation on the [doc.zebra.zfnd.org](https://doc.zebra.zfnd.org) +website with [docs.rs](https://docs.rs/releases/search?query=zebra). All links have been updated. + +Zebra's API documentation can be found on: +- [`docs.rs`](https://docs.rs/releases/search?query=zebra), which renders documentation for the + public API of the latest crate releases; +- [`doc-internal.zebra.zfnd.org`](https://doc-internal.zebra.zfnd.org/), which renders + documentation for the internal API on the `main` branch. + +[doc.zebra.zfnd.org](https://doc.zebra.zfnd.org) stopped being updated a few days before this release, +and it will soon be shut down. + +### Significant Changes + +This release contains the following significant changes: + +#### Mining RPCs in Production Builds Zebra's mining RPCs are now available in release builds (#7740). Any Zebra instance can be used by a solo miner or mining pool. This stabilises 12 RPCs, including `getblocktemplate`, `submitblock`, @@ -23,7 +73,7 @@ read our [mining blog post](https://zfnd.org/experimental-mining-support-in-zebr Please [let us know](https://github.com/ZcashFoundation/zebra/issues/new?assignees=&labels=C-enhancement%2CS-needs-triage&projects=&template=feature_request.yml&title=feature%3A+) if your mining pool needs extra RPC methods or fields. -### Zcash Parameters in `zebrad` Binary +#### Zcash Parameters in `zebrad` Binary `zebrad` now bundles zk-SNARK parameters directly into its binary. This increases the binary size by a few megabytes, but reduces the size of the Docker image by around 600 MB because @@ -44,9 +94,44 @@ so it can't be used to retry failed downloads in `zebrad` 1.3.0 and earlier. We recommend upgrading to the latest Zebra release to avoid download issues in new installs. +#### macOS Support + +macOS x86_64 is now supported on a best-effort basis. macOS builds and some tests run in Zebra's CI. + ### Security -TODO: rest of changelog +- Reconnect with peers after brief network interruption ([#7853](https://github.com/ZcashFoundation/zebra/pull/7853)) +- Add outer timeouts for critical network operations to avoid hangs ([#7869](https://github.com/ZcashFoundation/zebra/pull/7869)) +- Set iterator read bounds where possible in DiskDb, to avoid a known RocksDB denial of service issue ([#7731](https://github.com/ZcashFoundation/zebra/pull/7731), [#7732](https://github.com/ZcashFoundation/zebra/pull/7732)) +- Fix concurrency issues in tree key formats, and CPU usage in genesis tree roots ([#7392](https://github.com/ZcashFoundation/zebra/pull/7392)) + +### Removed + +- Remove the `zebrad download` command, because it no longer does anything ([#7819](https://github.com/ZcashFoundation/zebra/pull/7819)) + +### Added + +- Enable mining RPCs by default in production builds ([#7740](https://github.com/ZcashFoundation/zebra/pull/7740)) +- Re-enable macOS builds and tests in CI ([#7834](https://github.com/ZcashFoundation/zebra/pull/7834)) +- Make macOS x86_64 a tier 2 supported platform in the docs ([#7843](https://github.com/ZcashFoundation/zebra/pull/7843)) +- Add macOS M1 as a tier 3 supported platform ([#7851](https://github.com/ZcashFoundation/zebra/pull/7851)) + +### Changed + +- Build Sprout and Sapling parameters into the zebrad binary, so a download server isn't needed ([#7800](https://github.com/ZcashFoundation/zebra/pull/7800), [#7844](https://github.com/ZcashFoundation/zebra/pull/7844)) +- Bump ECC dependencies for `zcashd` 5.7.0 ([#7784](https://github.com/ZcashFoundation/zebra/pull/7784)) +- Refactor the installation instructions for the `s-nomp` mining pool software ([#7835](https://github.com/ZcashFoundation/zebra/pull/7835)) + +### Fixed + +- Make the `latest` Docker tag point to the production build, rather than the build with experimental features ([#7817](https://github.com/ZcashFoundation/zebra/pull/7817)) +- Fix an incorrect consensus-critical ZIP 212 comment ([#7774](https://github.com/ZcashFoundation/zebra/pull/7774)) +- Fix broken links to `zebra_network` and `zebra_state` `Config` structs on doc-internal.zebra.zfnd.org ([#7838](https://github.com/ZcashFoundation/zebra/pull/7838)) + +### Contributors + +Thank you to everyone who contributed to this release, we couldn't make Zebra without you: +@arya2, @gustavovalverde, @mpguerra, @oxarbitrage, @rex4539, @teor2345, @upbqdn, and @vuittont60. ## [Zebra 1.3.0](https://github.com/ZcashFoundation/zebra/releases/tag/v1.3.0) - 2023-10-16 @@ -996,12 +1081,12 @@ When there are a lot of large user-generated transactions on the network, Zebra ### Configuration Changes -- Split the checkpoint and full verification [`sync` concurrency options](https://doc.zebra.zfnd.org/zebrad/config/struct.SyncSection.html) (#4726, #4758): +- Split the checkpoint and full verification [`sync` concurrency options](https://docs.rs/zebrad/latest/zebrad/components/sync/struct.Config.html) (#4726, #4758): - Add a new `full_verify_concurrency_limit` - Rename `max_concurrent_block_requests` to `download_concurrency_limit` - Rename `lookahead_limit` to `checkpoint_verify_concurrency_limit` For backwards compatibility, the old names are still accepted as aliases. -- Add a new `parallel_cpu_threads` [`sync` concurrency option](https://doc.zebra.zfnd.org/zebrad/config/struct.SyncSection.html) (#4776). +- Add a new `parallel_cpu_threads` [`sync` concurrency option](https://docs.rs/zebrad/latest/zebrad/components/sync/struct.Config.html) (#4776). This option sets the number of threads to use for CPU-bound tasks, such as proof and signature verification. By default, Zebra uses all available CPU cores. diff --git a/Cargo.lock b/Cargo.lock index 2d5cd0991d9..52b19e9e457 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -79,14 +79,15 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.3" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" +checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a" dependencies = [ "cfg-if 1.0.0", "getrandom 0.2.10", "once_cell", "version_check", + "zerocopy", ] [[package]] @@ -343,9 +344,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.4" +version = "0.21.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ba43ea6f343b788c8764558649e08df62f86c6ef251fdaeb1ffd010a9ae50a2" +checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9" [[package]] name = "base64ct" @@ -927,9 +928,9 @@ checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "cpufeatures" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fbc60abd742b35f2492f808e1abbb83d45f72db402e14c55057edc9c7b1e9e4" +checksum = "ce420fe07aecd3e67c5f910618fe65e94158f6dcc0adf44e00d69ce2bdfe0fd0" dependencies = [ "libc", ] @@ -1058,9 +1059,9 @@ dependencies = [ [[package]] name = "curve25519-dalek-derive" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", @@ -1081,9 +1082,9 @@ dependencies = [ [[package]] name = "cxx-gen" -version = "0.7.109" +version = "0.7.110" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1318697052dbc5a12f8e5e603441413d6096350c29b8361eb45a9c531be61dda" +checksum = "a3e0fc77e9f8d61724be90deb42a7e50ba3bf37c7c16dc91cdba821f69a5e0e9" dependencies = [ "codespan-reporting", "proc-macro2", @@ -1399,9 +1400,9 @@ dependencies = [ [[package]] name = "fiat-crypto" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0870c84016d4b481be5c9f323c24f65e31e901ae618f0e80f4308fb00de1d2d" +checksum = "a481586acf778f1b1455424c343f71124b048ffa5f4fc3f8f6ae9dc432dcb3c7" [[package]] name = "fixed-hash" @@ -1769,7 +1770,7 @@ checksum = "5a03ba7d4c9ea41552cd4351965ff96883e629693ae85005c501bb4b9e1c48a7" dependencies = [ "lazy_static", "rand_core 0.6.4", - "ring", + "ring 0.16.20", "secp256k1", "thiserror", ] @@ -1926,7 +1927,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.9", + "socket2 0.4.10", "tokio", "tower-service", "tracing", @@ -1935,9 +1936,9 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d78e1e73ec14cf7375674f74d7dde185c8206fd9dea6fb6295e8a98098aaa97" +checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", "http", @@ -2046,9 +2047,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.0.2" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8adf3ddd720272c6ea8bf59463c04e0f93d0bbf7c5439b691bca2987e0270897" +checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" dependencies = [ "equivalent", "hashbrown 0.14.2", @@ -2122,9 +2123,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" +checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" [[package]] name = "is-terminal" @@ -2467,7 +2468,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a4964177ddfdab1e3a2b37aec7cf320e14169abb0ed73999f558136409178d5" dependencies = [ - "base64 0.21.4", + "base64 0.21.5", "hyper", "indexmap 1.9.3", "ipnet", @@ -2527,9 +2528,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" +checksum = "3dce281c5e46beae905d4de1870d8b1509a9142b62eedf18b443b011ca8343d0" dependencies = [ "libc", "wasi 0.11.0+wasi-snapshot-preview1", @@ -2887,9 +2888,9 @@ checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "pest" -version = "2.7.4" +version = "2.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c022f1e7b65d6a24c0dbbd5fb344c66881bc01f3e5ae74a1c8100f2f985d98a4" +checksum = "ae9cee2a55a544be8b89dc6848072af97a20f2422603c10865be2a42b580fff5" dependencies = [ "memchr", "thiserror", @@ -2898,9 +2899,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.4" +version = "2.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35513f630d46400a977c4cb58f78e1bfbe01434316e60c37d27b9ad6139c66d8" +checksum = "81d78524685f5ef2a3b3bd1cafbc9fcabb036253d9b1463e726a91cd16e2dfc2" dependencies = [ "pest", "pest_generator", @@ -2908,9 +2909,9 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.4" +version = "2.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc9fc1b9e7057baba189b5c626e2d6f40681ae5b6eb064dc7c7834101ec8123a" +checksum = "68bd1206e71118b5356dae5ddc61c8b11e28b09ef6a31acbd15ea48a28e0c227" dependencies = [ "pest", "pest_meta", @@ -2921,9 +2922,9 @@ dependencies = [ [[package]] name = "pest_meta" -version = "2.7.4" +version = "2.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1df74e9e7ec4053ceb980e7c0c8bd3594e977fde1af91daba9c928e8e8c6708d" +checksum = "7c747191d4ad9e4a4ab9c8798f1e82a39affe7ef9648390b7e5548d18e099de6" dependencies = [ "once_cell", "pest", @@ -2937,7 +2938,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" dependencies = [ "fixedbitset", - "indexmap 2.0.2", + "indexmap 2.1.0", ] [[package]] @@ -3035,9 +3036,15 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.4.3" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31114a898e107c51bb1609ffaf55a0e011cf6a4d7f1170d0015a165082c0338b" +checksum = "3bccab0e7fd7cc19f820a1c8c91720af652d0c88dc9664dd72aef2614f04af3b" + +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "powerfmt" @@ -3504,7 +3511,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" dependencies = [ "async-compression", - "base64 0.21.4", + "base64 0.21.5", "bytes", "encoding_rs", "futures-core", @@ -3541,9 +3548,9 @@ dependencies = [ [[package]] name = "rgb" -version = "0.8.36" +version = "0.8.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20ec2d3e3fc7a92ced357df9cebd5a10b6fb2aa1ee797bf7e9ce2f17dffc8f59" +checksum = "05aaa8004b64fd573fc9d002f4e632d51ad4f026c2b5ba95fcb6c2f32c2c47d8" dependencies = [ "bytemuck", ] @@ -3558,11 +3565,25 @@ dependencies = [ "libc", "once_cell", "spin 0.5.2", - "untrusted", + "untrusted 0.7.1", "web-sys", "winapi", ] +[[package]] +name = "ring" +version = "0.17.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb0205304757e5d899b9c2e448b867ffd03ae7f988002e47cd24954391394d0b" +dependencies = [ + "cc", + "getrandom 0.2.10", + "libc", + "spin 0.9.8", + "untrusted 0.9.0", + "windows-sys 0.48.0", +] + [[package]] name = "ripemd" version = "0.1.3" @@ -3653,12 +3674,12 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.7" +version = "0.21.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" +checksum = "446e14c5cda4f3f30fe71863c34ec70f5ac79d6087097ad0bb433e1be5edf04c" dependencies = [ "log", - "ring", + "ring 0.17.5", "rustls-webpki", "sct", ] @@ -3669,17 +3690,17 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" dependencies = [ - "base64 0.21.4", + "base64 0.21.5", ] [[package]] name = "rustls-webpki" -version = "0.101.6" +version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c7d5dece342910d9ba34d259310cae3e0154b873b35408b787b59bce53d34fe" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring", - "untrusted", + "ring 0.17.5", + "untrusted 0.9.0", ] [[package]] @@ -3723,12 +3744,12 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "sct" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "ring", - "untrusted", + "ring 0.17.5", + "untrusted 0.9.0", ] [[package]] @@ -3905,7 +3926,7 @@ version = "1.0.108" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b" dependencies = [ - "indexmap 2.0.2", + "indexmap 2.1.0", "itoa", "ryu", "serde", @@ -3948,11 +3969,11 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64cd236ccc1b7a29e7e2739f27c0b2dd199804abc4290e32f59f3b68d6405c23" dependencies = [ - "base64 0.21.4", + "base64 0.21.5", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.0.2", + "indexmap 2.1.0", "serde", "serde_json", "serde_with_macros 3.4.0", @@ -4065,9 +4086,9 @@ checksum = "942b4a808e05215192e39f4ab80813e599068285906cc91aa64f923db842bd5a" [[package]] name = "socket2" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" +checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" dependencies = [ "libc", "winapi", @@ -4075,9 +4096,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4031e820eb552adee9295814c0ced9e5cf38ddf1e8b7d566d6de8e2538ea989e" +checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" dependencies = [ "libc", "windows-sys 0.48.0", @@ -4382,7 +4403,7 @@ dependencies = [ "parking_lot 0.12.1", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.4", + "socket2 0.5.5", "tokio-macros", "tracing", "windows-sys 0.48.0", @@ -4483,21 +4504,21 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.3" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b150d2f463da7b52f12110d3995dc86598bf90d535e929e5f5af15ab89155011" +checksum = "8ff9e3abce27ee2c9a37f9ad37238c1bdd4e789c84ba37df76aa4d528f5072cc" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.20.3", + "toml_edit 0.20.7", ] [[package]] name = "toml_datetime" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51cc078118ed25af325985ff674c00c8416b0f962be67da4946854ebfc99f334" +checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" dependencies = [ "serde", ] @@ -4508,18 +4529,18 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.0.2", + "indexmap 2.1.0", "toml_datetime", "winnow", ] [[package]] name = "toml_edit" -version = "0.20.3" +version = "0.20.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a2534c1aa199edef7108fb7d970facaa17f8f8cc5ce6bde75372cfa1051ed91" +checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" dependencies = [ - "indexmap 2.0.2", + "indexmap 2.1.0", "serde", "serde_spanned", "toml_datetime", @@ -4535,7 +4556,7 @@ dependencies = [ "async-stream", "async-trait", "axum", - "base64 0.21.4", + "base64 0.21.5", "bytes", "h2", "http", @@ -4589,7 +4610,7 @@ dependencies = [ [[package]] name = "tower-batch-control" -version = "0.2.41-beta.6" +version = "0.2.41-beta.7" dependencies = [ "color-eyre", "ed25519-zebra", @@ -4613,7 +4634,7 @@ dependencies = [ [[package]] name = "tower-fallback" -version = "0.2.41-beta.6" +version = "0.2.41-beta.7" dependencies = [ "futures-core", "pin-project", @@ -4737,12 +4758,12 @@ dependencies = [ [[package]] name = "tracing-log" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" +checksum = "f751112709b4e791d8ce53e32c4ed2d353565a795ce84da2285393f41557bdf2" dependencies = [ - "lazy_static", "log", + "once_cell", "tracing-core", ] @@ -4896,13 +4917,19 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + [[package]] name = "ureq" version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f5ccd538d4a604753ebc2f17cd9946e89b77bf87f6a8e2309667c6f2e87855e3" dependencies = [ - "base64 0.21.4", + "base64 0.21.5", "log", "once_cell", "rustls", @@ -4958,9 +4985,9 @@ checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] name = "vergen" -version = "8.2.5" +version = "8.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85e7dc29b3c54a2ea67ef4f953d5ec0c4085035c0ae2d325be1c0d2144bd9f16" +checksum = "1290fd64cc4e7d3c9b07d7f333ce0ce0007253e32870e632624835cc80b83939" dependencies = [ "anyhow", "git2", @@ -5339,9 +5366,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "winnow" -version = "0.5.17" +version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3b801d0e0a6726477cc207f60162da452f3a95adb368399bef20a946e06f65c" +checksum = "176b6138793677221d420fd2f0aeeced263f197688b36484660da767bca2fa32" dependencies = [ "memchr", ] @@ -5410,7 +5437,7 @@ version = "0.10.0-rc.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ecc33f71747a93d509f7e1c047961e359a271bdf4869cc07f7f65ee1ba7df8c2" dependencies = [ - "base64 0.21.4", + "base64 0.21.5", "bech32", "bls12_381", "bs58", @@ -5567,7 +5594,7 @@ dependencies = [ [[package]] name = "zebra-chain" -version = "1.0.0-beta.30" +version = "1.0.0-beta.31" dependencies = [ "bitflags 2.4.1", "bitflags-serde-legacy", @@ -5628,7 +5655,7 @@ dependencies = [ [[package]] name = "zebra-consensus" -version = "1.0.0-beta.30" +version = "1.0.0-beta.31" dependencies = [ "bellman", "blake2b_simd", @@ -5674,7 +5701,7 @@ dependencies = [ [[package]] name = "zebra-network" -version = "1.0.0-beta.30" +version = "1.0.0-beta.31" dependencies = [ "bitflags 2.4.1", "byteorder", @@ -5685,7 +5712,7 @@ dependencies = [ "hex", "howudoin", "humantime-serde", - "indexmap 2.0.2", + "indexmap 2.1.0", "itertools 0.11.0", "lazy_static", "metrics", @@ -5704,7 +5731,7 @@ dependencies = [ "tokio", "tokio-stream", "tokio-util 0.7.10", - "toml 0.8.3", + "toml 0.8.6", "tower", "tracing", "tracing-error", @@ -5715,7 +5742,7 @@ dependencies = [ [[package]] name = "zebra-node-services" -version = "1.0.0-beta.30" +version = "1.0.0-beta.31" dependencies = [ "color-eyre", "jsonrpc-core", @@ -5727,13 +5754,13 @@ dependencies = [ [[package]] name = "zebra-rpc" -version = "1.0.0-beta.30" +version = "1.0.0-beta.31" dependencies = [ "chrono", "futures", "hex", "hyper", - "indexmap 2.0.2", + "indexmap 2.1.0", "insta", "jsonrpc-core", "jsonrpc-derive", @@ -5778,7 +5805,7 @@ dependencies = [ [[package]] name = "zebra-script" -version = "1.0.0-beta.30" +version = "1.0.0-beta.31" dependencies = [ "displaydoc", "hex", @@ -5791,7 +5818,7 @@ dependencies = [ [[package]] name = "zebra-state" -version = "1.0.0-beta.30" +version = "1.0.0-beta.31" dependencies = [ "bincode", "chrono", @@ -5804,7 +5831,7 @@ dependencies = [ "hex-literal", "howudoin", "humantime-serde", - "indexmap 2.0.2", + "indexmap 2.1.0", "insta", "itertools 0.11.0", "jubjub", @@ -5835,13 +5862,13 @@ dependencies = [ [[package]] name = "zebra-test" -version = "1.0.0-beta.30" +version = "1.0.0-beta.31" dependencies = [ "color-eyre", "futures", "hex", "humantime", - "indexmap 2.0.2", + "indexmap 2.1.0", "insta", "itertools 0.11.0", "lazy_static", @@ -5863,7 +5890,7 @@ dependencies = [ [[package]] name = "zebra-utils" -version = "1.0.0-beta.30" +version = "1.0.0-beta.31" dependencies = [ "color-eyre", "hex", @@ -5884,7 +5911,7 @@ dependencies = [ [[package]] name = "zebrad" -version = "1.3.0" +version = "1.4.0" dependencies = [ "abscissa_core", "atty", @@ -5899,7 +5926,7 @@ dependencies = [ "howudoin", "humantime-serde", "hyper", - "indexmap 2.0.2", + "indexmap 2.1.0", "indicatif", "inferno", "insta", @@ -5926,7 +5953,7 @@ dependencies = [ "tinyvec", "tokio", "tokio-stream", - "toml 0.8.3", + "toml 0.8.6", "tonic", "tonic-build", "tower", @@ -5949,6 +5976,26 @@ dependencies = [ "zebra-utils", ] +[[package]] +name = "zerocopy" +version = "0.7.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "686b7e407015242119c33dab17b8f61ba6843534de936d94368856528eae4dcc" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "020f3dfe25dfc38dfea49ce62d5d45ecdd7f0d8a724fa63eb36b6eba4ec76806" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.38", +] + [[package]] name = "zeroize" version = "1.6.0" diff --git a/Cargo.toml b/Cargo.toml index 4c8b5f651e1..e2c5e03372a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,6 +18,13 @@ members = [ # Use the edition 2021 dependency resolver in the workspace, to match the crates resolver = "2" +# `cargo release` settings + +[workspace.metadata.release] + +# We always do releases from the main branch +allow-branch = ["main"] + # Compilation settings [profile.dev] diff --git a/README.md b/README.md index 36d4c6d7d02..177df3fc3c5 100644 --- a/README.md +++ b/README.md @@ -27,7 +27,7 @@ consensus-compatible implementation of a Zcash node. Zebra's network stack is interoperable with `zcashd`, and Zebra implements all the features required to reach Zcash network consensus, including the validation of all the consensus rules for the NU5 network upgrade. -[Here](https://doc.zebra.zfnd.org/zebrad/index.html#zebra-advantages) are some +[Here](https://docs.rs/zebrad/latest/zebrad/index.html#zebra-advantages) are some benefits of Zebra. Zebra validates blocks and transactions, but needs extra software to generate @@ -148,8 +148,7 @@ You can combine multiple features by listing them as parameters of the `--featur cargo install --features=" ..." ... ``` -Our full list of experimental and developer features is in [the API -documentation](https://doc.zebra.zfnd.org/zebrad/index.html#zebra-feature-flags). +Our full list of experimental and developer features is in [the API documentation](https://docs.rs/zebrad/latest/zebrad/index.html#zebra-feature-flags). Some debugging and monitoring features are disabled in release builds to increase performance. @@ -158,6 +157,8 @@ performance. There are a few bugs in Zebra that we're still working on fixing: +- [The `getpeerinfo` RPC shows current and recent outbound connections](https://github.com/ZcashFoundation/zebra/issues/7893), rather than current inbound and outbound connections. + - [Progress bar estimates can become extremely large](https://github.com/console-rs/indicatif/issues/556). We're waiting on a fix in the progress bar library. - Zebra currently gossips and connects to [private IP addresses](https://en.wikipedia.org/wiki/IP_address#Private_addresses), we want to [disable private IPs but provide a config (#3117)](https://github.com/ZcashFoundation/zebra/issues/3117) in an upcoming release @@ -176,10 +177,12 @@ We will continue to add new features as part of future network upgrades, and in The [Zebra website](https://zebra.zfnd.org/) contains user documentation, such as how to run or configure Zebra, set up metrics integrations, etc., as well as -developer documentation, such as design documents. We also render [API -documentation](https://doc.zebra.zfnd.org) for the external API of our crates, -as well as [internal documentation](https://doc-internal.zebra.zfnd.org) for -private APIs. +developer documentation, such as design documents. It also renders +[internal documentation](https://doc-internal.zebra.zfnd.org) for private APIs +on the `main` branch. + +`docs.rs` renders [API documentation](https://docs.rs/releases/search?query=zebra) +for the external API of the latest releases of our crates. ## User support diff --git a/book/src/api.md b/book/src/api.md index c24898b82a2..04e064b406e 100644 --- a/book/src/api.md +++ b/book/src/api.md @@ -2,5 +2,7 @@ Zebra's API documentation is generated using Rustdoc: -- [`doc.zebra.zfnd.org`](https://doc.zebra.zfnd.org/) renders documentation for the public API; -- [`doc-internal.zebra.zfnd.org`](https://doc-internal.zebra.zfnd.org/) renders documentation for the internal API. \ No newline at end of file +- [`docs.rs`](https://docs.rs/releases/search?query=zebra) renders documentation for the public API + of the latest crate releases; +- [`doc-internal.zebra.zfnd.org`](https://doc-internal.zebra.zfnd.org/) renders documentation for + the internal API on the `main` branch. diff --git a/book/src/dev.md b/book/src/dev.md index 7e7341a1cda..2148325aaf0 100644 --- a/book/src/dev.md +++ b/book/src/dev.md @@ -3,5 +3,7 @@ This section contains the contribution guide and design documentation. It does not contain API documentation, which is generated using Rustdoc: -- [`doc.zebra.zfnd.org`](https://doc.zebra.zfnd.org/) renders documentation for the public API; -- [`doc-internal.zebra.zfnd.org`](https://doc-internal.zebra.zfnd.org/) renders documentation for the internal API. +- [`docs.rs`](https://docs.rs/releases/search?query=zebra) renders documentation for the public API + of the latest crate releases; +- [`doc-internal.zebra.zfnd.org`](https://doc-internal.zebra.zfnd.org/) renders documentation for + the internal API on the `main` branch. diff --git a/book/src/dev/continuous-integration.md b/book/src/dev/continuous-integration.md index 4310f904013..a9771f6f61a 100644 --- a/book/src/dev/continuous-integration.md +++ b/book/src/dev/continuous-integration.md @@ -33,14 +33,18 @@ We try to use Mergify as much as we can, so all PRs get consistent checks. Some PRs don't use Mergify: - Mergify config updates - Admin merges, which happen when there are multiple failures on the `main` branch - (these are disabled by our branch protection rules, but admins can remove the "don't allow bypassing these rules" setting) -- Manual merges (these are usually disabled by our branch protection rules) +- Manual merges (these are allowed by our branch protection rules, but we almost always use Mergify) + +Merging with failing CI is usually disabled by our branch protection rules. +See the `Admin: Manually Merging PRs` section below for manual merge instructions. We use workflow conditions to skip some checks on PRs, Mergify, or the `main` branch. For example, some workflow changes skip Rust code checks. When a workflow can skip a check, we need to create [a patch workflow](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/collaborating-on-repositories-with-code-quality-features/troubleshooting-required-status-checks#handling-skipped-but-required-checks) with an empty job with the same name. This is a [known Actions issue](https://github.com/orgs/community/discussions/13690#discussioncomment-6653382). This lets the branch protection rules pass when the job is skipped. In Zebra, we name these workflows with the extension `.patch.yml`. +### Branch Protection Rules + Branch protection rules should be added for every failure that should stop a PR merging, break a release, or cause problems for Zebra users. We also add branch protection rules for developer or devops features that we need to keep working, like coverage. @@ -55,6 +59,60 @@ When a new job is added in a PR, use the `#devops` Slack channel to ask a GitHub Adding a new Zebra crate automatically adds a new job to build that crate by itself in [ci-build-crates.yml](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/ci-build-crates.yml), so new crate PRs also need to add a branch protection rule. +#### Admin: Changing Branch Protection Rules + +[Zebra repository admins](https://github.com/orgs/ZcashFoundation/teams/zebra-admins) and +[Zcash Foundation organisation owners](https://github.com/orgs/ZcashFoundation/people?query=role%3Aowner) +can add or delete branch protection rules in the Zebra repository. + +To change branch protection rules: + +Any developer: + +0. Run a PR containing the new rule, so its name is available to autocomplete. +1. If the job doesn't run on all PRs, add a patch job with the name of the job. + If the job calls a reusable workflow, the name is `Caller job / Reusable step`. + (The name of the job inside the reusable workflow is ignored.) + +Admin: + +2. Go to the [branch protection rule settings](https://github.com/ZcashFoundation/zebra/settings/branches) +3. Click on `Edit` for the `main` branch +4. Scroll down to the `Require status checks to pass before merging` section. + (This section must always be enabled. If it is disabled, all the rules get deleted.) + +To add jobs: + +5. Start typing the name of the job or step in the search box +6. Select the name of the job or step to add it + +To remove jobs: + +7. Go to `Status checks that are required.` +8. Find the job name, and click the cross on the right to remove it + +And finally: + +9. Click `Save changes`, using your security key if needed + +If you accidentally delete a lot of rules, and you can't remember what they were, ask a +ZF organisation owner to send you a copy of the rules from the [audit log](https://github.com/organizations/ZcashFoundation/settings/audit-log). + +Organisation owners can also monitor rule changes and other security settings using this log. + +#### Admin: Manually Merging PRs + +Admins can allow merges with failing CI, to fix CI when multiple issues are causing failures. + +Admin: +1. Follow steps 2 and 3 above to open the `main` branch protection rule settings +2. Scroll down to `Do not allow bypassing the above settings` +3. Uncheck it +4. Click `Save changes` +5. Do the manual merge, and put an explanation on the PR +6. Re-open the branch protection rule settings, and re-enable `Do not allow bypassing the above settings` + + ### Pull Requests from Forked Repositories GitHub doesn't allow PRs from forked repositories to have access to our repository secret keys, even after we approve their CI. @@ -64,7 +122,7 @@ Until we [fix this CI bug](https://github.com/ZcashFoundation/zebra/issues/4529) 1. Reviewing the code to make sure it won't give our secret keys to anyone 2. Pushing a copy of the branch to the Zebra repository 3. Opening a PR using that branch -4. Closing the original PR with a note that it will be merged (this is reauired by Mergify) +4. Closing the original PR with a note that it will be merged (closing duplicate PRs is required by Mergify) 5. Asking another Zebra developer to approve the new PR ## Manual Testing Using Google Cloud diff --git a/book/src/dev/overview.md b/book/src/dev/overview.md index 2142be8b0fc..afefbd33403 100644 --- a/book/src/dev/overview.md +++ b/book/src/dev/overview.md @@ -86,7 +86,7 @@ other functionality, without requiring a full node. At a high level, the fullnode functionality required by `zebrad` is factored into several components: -- [`zebra-chain`](https://doc.zebra.zfnd.org/zebra_chain/index.html), providing +- [`zebra-chain`](https://docs.rs/zebra_chain), providing definitions of core data structures for Zcash, such as blocks, transactions, addresses, etc., and related functionality. It also contains the implementation of the consensus-critical serialization formats used in Zcash. @@ -100,7 +100,7 @@ into several components: towards verifying transactions, but will be extended to support creating them in the future. -- [`zebra-network`](https://doc.zebra.zfnd.org/zebra_network/index.html), +- [`zebra-network`](https://docs.rs/zebra_network), providing an asynchronous, multithreaded implementation of the Zcash network protocol inherited from Bitcoin. In contrast to `zcashd`, each peer connection has a separate state machine, and the crate translates the @@ -113,21 +113,21 @@ into several components: isolated from all other node state. This can be used, for instance, to safely relay data over Tor, without revealing distinguishing information. -- [`zebra-script`](https://doc.zebra.zfnd.org/zebra_script/index.html) provides +- [`zebra-script`](https://docs.rs/zebra_script) provides script validation. Currently, this is implemented by linking to the C++ script verification code from `zcashd`, but in the future we may implement a pure-Rust script implementation. -- [`zebra-consensus`](https://doc.zebra.zfnd.org/zebra_consensus/index.html) +- [`zebra-consensus`](https://docs.rs/zebra_consensus) performs [*semantic validation*](https://zebra.zfnd.org/dev/rfcs/0002-parallel-verification.html#verification-stages) of blocks and transactions: all consensus rules that can be checked independently of the chain state, such as verification of signatures, proofs, and scripts. Internally, the library - uses [`tower-batch-control`](https://doc.zebra.zfnd.org/tower_batch_control/index.html) to + uses [`tower-batch-control`](https://docs.rs/tower_batch_control) to perform automatic, transparent batch processing of contemporaneous verification requests. -- [`zebra-state`](https://doc.zebra.zfnd.org/zebra_state/index.html) is +- [`zebra-state`](https://docs.rs/zebra_state) is responsible for storing, updating, and querying the chain state. The state service is responsible for [*contextual verification*](https://zebra.zfnd.org/dev/rfcs/0002-parallel-verification.html#verification-stages): all consensus rules @@ -135,7 +135,7 @@ into several components: such as updating the nullifier set or checking that transaction inputs remain unspent. -- [`zebrad`](https://doc.zebra.zfnd.org/zebrad/index.html) contains the full +- [`zebrad`](https://docs.rs/zebrad) contains the full node, which connects these components together and implements logic to handle inbound requests from peers and the chain sync process. diff --git a/book/src/dev/rfcs/0004-asynchronous-script-verification.md b/book/src/dev/rfcs/0004-asynchronous-script-verification.md index 23800002119..439424044f9 100644 --- a/book/src/dev/rfcs/0004-asynchronous-script-verification.md +++ b/book/src/dev/rfcs/0004-asynchronous-script-verification.md @@ -31,11 +31,11 @@ and in parallel on a thread pool. - unlock script: a script satisfying the conditions of the lock script, allowing a UTXO to be spent. Stored in the [`transparent::Input::PrevOut::lock_script`][lock_script] field. -[transout]: https://doc.zebra.zfnd.org/zebra_chain/transparent/struct.Output.html -[outpoint]: https://doc.zebra.zfnd.org/zebra_chain/transparent/struct.OutPoint.html -[lock_script]: https://doc.zebra.zfnd.org/zebra_chain/transparent/struct.Output.html#structfield.lock_script -[transin]: https://doc.zebra.zfnd.org/zebra_chain/transparent/enum.Input.html -[unlock_script]: https://doc.zebra.zfnd.org/zebra_chain/transparent/enum.Input.html#variant.PrevOut.field.unlock_script +[transout]: https://doc-internal.zebra.zfnd.org/zebra_chain/transparent/struct.Output.html +[outpoint]: https://doc-internal.zebra.zfnd.org/zebra_chain/transparent/struct.OutPoint.html +[lock_script]: https://doc-internal.zebra.zfnd.org/zebra_chain/transparent/struct.Output.html#structfield.lock_script +[transin]: https://doc-internal.zebra.zfnd.org/zebra_chain/transparent/enum.Input.html +[unlock_script]: https://doc-internal.zebra.zfnd.org/zebra_chain/transparent/enum.Input.html#variant.PrevOut.field.unlock_script # Guide-level explanation diff --git a/book/src/dev/rfcs/0005-state-updates.md b/book/src/dev/rfcs/0005-state-updates.md index 7767975fd11..41bae0a07f6 100644 --- a/book/src/dev/rfcs/0005-state-updates.md +++ b/book/src/dev/rfcs/0005-state-updates.md @@ -713,9 +713,9 @@ validation and the anchor calculations.) Hypothetically, if Sapling were activated from genesis, the specification requires a Sapling anchor, but `zcashd` would ignore that anchor. -[`JoinSplit`]: https://doc.zebra.zfnd.org/zebra_chain/sprout/struct.JoinSplit.html -[`Spend`]: https://doc.zebra.zfnd.org/zebra_chain/sapling/spend/struct.Spend.html -[`Action`]: https://doc.zebra.zfnd.org/zebra_chain/orchard/struct.Action.html +[`JoinSplit`]: https://doc-internal.zebra.zfnd.org/zebra_chain/sprout/struct.JoinSplit.html +[`Spend`]: https://doc-internal.zebra.zfnd.org/zebra_chain/sapling/spend/struct.Spend.html +[`Action`]: https://doc-internal.zebra.zfnd.org/zebra_chain/orchard/struct.Action.html These updates can be performed in a batch or without necessarily iterating over all transactions, if the data is available by other means; they're diff --git a/book/src/user/docker.md b/book/src/user/docker.md index 439cefd27fe..0d37b7a847e 100644 --- a/book/src/user/docker.md +++ b/book/src/user/docker.md @@ -17,7 +17,7 @@ docker run --detach zfnd/zebra:latest ### Build it locally ```shell -git clone --depth 1 --branch v1.3.0 https://github.com/ZcashFoundation/zebra.git +git clone --depth 1 --branch v1.4.0 https://github.com/ZcashFoundation/zebra.git docker build --file docker/Dockerfile --target runtime --tag zebra:local . docker run --detach zebra:local ``` diff --git a/book/src/user/install.md b/book/src/user/install.md index 2a03398d6e4..83e14c87254 100644 --- a/book/src/user/install.md +++ b/book/src/user/install.md @@ -20,7 +20,7 @@ To compile Zebra directly from GitHub, or from a GitHub release source archive: ```sh git clone https://github.com/ZcashFoundation/zebra.git cd zebra -git checkout v1.3.0 +git checkout v1.4.0 ``` 3. Build and Run `zebrad` @@ -33,7 +33,7 @@ target/release/zebrad start ### Compiling from git using cargo install ```sh -cargo install --git https://github.com/ZcashFoundation/zebra --tag v1.3.0 zebrad +cargo install --git https://github.com/ZcashFoundation/zebra --tag v1.4.0 zebrad ``` ### Compiling on ARM diff --git a/book/src/user/lightwalletd.md b/book/src/user/lightwalletd.md index 5b7645e51fc..230bb879e34 100644 --- a/book/src/user/lightwalletd.md +++ b/book/src/user/lightwalletd.md @@ -55,7 +55,7 @@ parallel_cpu_threads = 0 ``` **WARNING:** This config allows multiple Zebra instances to share the same RPC port. -See the [RPC config documentation](https://doc.zebra.zfnd.org/zebra_rpc/config/struct.Config.html) for details. +See the [RPC config documentation](https://docs.rs/zebra_rpc/latest/zebra_rpc/config/struct.Config.html) for details. ## Sync Zebra @@ -71,7 +71,7 @@ Zebra will display information about sync process: ```console ... -zebrad::commands::start: estimated progress to chain tip sync_percent=10.783 % +zebrad::commands::start: estimated progress to chain tip sync_percent=10.783 % ... ``` @@ -79,13 +79,13 @@ Until eventually it will get there: ```console ... -zebrad::commands::start: finished initial sync to chain tip, using gossiped blocks sync_percent=100.000 % +zebrad::commands::start: finished initial sync to chain tip, using gossiped blocks sync_percent=100.000 % ... ``` You can interrupt the process at any time with `ctrl-c` and Zebra will resume the next time at around the block you were downloading when stopping the process. -When deploying for production infrastructure, the above command can/should be implemented as a server service or similar configuration. +When deploying for production infrastructure, the above command can be run as a service or daemon. For implementing zebra as a service please see [here](https://github.com/ZcashFoundation/zebra/blob/main/zebrad/systemd/zebrad.service). @@ -144,7 +144,7 @@ Wait until lightwalletd is in sync before connecting any wallet into it. You wil ## Run tests [#run-tests]: (#run-tests) -The Zebra team created tests for the interaction of `zebrad` and `lightwalletd`. +The Zebra team created tests for the interaction of `zebrad` and `lightwalletd`. To run all the Zebra `lightwalletd` tests: 1. install `lightwalletd` @@ -156,7 +156,7 @@ Please refer to [acceptance](https://github.com/ZcashFoundation/zebra/blob/main/ ## Connect a wallet to lightwalletd [#connect-wallet-to-lightwalletd]: (#connect-wallet-to-lightwalletd) -The final goal is to connect wallets to the lightwalletd service backed by Zebra. +The final goal is to connect wallets to the lightwalletd service backed by Zebra. For demo purposes we used [zecwallet-cli](https://github.com/adityapk00/zecwallet-light-cli) with the [adityapk00/lightwalletd](https://github.com/adityapk00/lightwalletd) fork. We didn't test [zecwallet-cli](https://github.com/adityapk00/zecwallet-light-cli) with [zcash/lightwalletd](https://github.com/zcash/lightwalletd) yet. @@ -184,5 +184,5 @@ Lightclient connecting to http://127.0.0.1:9067/ "total_blocks_synced": 49476 } Ready! -(main) Block:1683911 (type 'help') >> +(main) Block:1683911 (type 'help') >> ``` diff --git a/book/src/user/metrics.md b/book/src/user/metrics.md index 030f67c1df0..474ec619c2c 100644 --- a/book/src/user/metrics.md +++ b/book/src/user/metrics.md @@ -56,4 +56,4 @@ front end that you can visualize: ![image info](grafana.png) -[metrics_section]: https://doc.zebra.zfnd.org/zebrad/config/struct.MetricsSection.html +[metrics_section]: https://docs.rs/zebrad/latest/zebrad/components/metrics/struct.Config.html diff --git a/book/src/user/requirements.md b/book/src/user/requirements.md index af6b669e69e..df95aa139ba 100644 --- a/book/src/user/requirements.md +++ b/book/src/user/requirements.md @@ -31,7 +31,7 @@ Zebra uses the following inbound and outbound TCP ports: - 18233 on Testnet If you configure Zebra with a specific -[`listen_addr`](https://doc.zebra.zfnd.org/zebra_network/struct.Config.html#structfield.listen_addr), +[`listen_addr`](https://docs.rs/zebra_network/latest/zebra_network/struct.Config.html#structfield.listen_addr), it will advertise this address to other nodes for inbound connections. Outbound connections are required to sync, inbound connections are optional. Zebra also needs access to the Zcash DNS seeders, via the OS DNS resolver (usually port diff --git a/book/src/user/run.md b/book/src/user/run.md index 8d383db60f6..ef2a1cd80b3 100644 --- a/book/src/user/run.md +++ b/book/src/user/run.md @@ -7,7 +7,7 @@ changing the config. The configuration format is the TOML encoding of the internal config structure, and documentation for all of the config options can be found -[here](https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html). +[here](https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html). - `zebrad start` starts a full node. diff --git a/book/src/user/startup.md b/book/src/user/startup.md index ffb41dbe7f0..f1f7013f354 100644 --- a/book/src/user/startup.md +++ b/book/src/user/startup.md @@ -27,7 +27,7 @@ Some important parts of the config are: - `state.cache_dir`: where the cached state is stored on disk - `rpc.listen_addr`: optional JSON-RPC listener port -See [the full list of configuration options](https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html). +See [the full list of configuration options](https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html). ``` zebrad::commands::start: Starting zebrad diff --git a/book/src/user/tracing.md b/book/src/user/tracing.md index 9d502abff8c..d1b984f05df 100644 --- a/book/src/user/tracing.md +++ b/book/src/user/tracing.md @@ -36,10 +36,10 @@ and the [`flamegraph`][flamegraph] runtime config option. Compile Zebra with `--features sentry` to monitor it using [Sentry][sentry] in production. -[tracing_section]: https://doc.zebra.zfnd.org/zebrad/components/tracing/struct.Config.html -[filter]: https://doc.zebra.zfnd.org/zebrad/components/tracing/struct.Config.html#structfield.filter -[flamegraph]: https://doc.zebra.zfnd.org/zebrad/components/tracing/struct.Config.html#structfield.flamegraph +[tracing_section]: https://docs.rs/zebrad/latest/zebrad/components/tracing/struct.Config.html +[filter]: https://docs.rs/zebrad/latest/zebrad/components/tracing/struct.Config.html#structfield.filter +[flamegraph]: https://docs.rs/zebrad/latest/zebrad/components/tracing/struct.Config.html#structfield.flamegraph [flamegraphs]: http://www.brendangregg.com/flamegraphs.html [systemd_journald]: https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html -[use_journald]: https://doc.zebra.zfnd.org/zebrad/components/tracing/struct.Config.html#structfield.use_journald +[use_journald]: https://docs.rs/zebrad/latest/zebrad/components/tracing/struct.Config.html#structfield.use_journald [sentry]: https://sentry.io/welcome/ diff --git a/book/src/user/troubleshooting.md b/book/src/user/troubleshooting.md index b1a89d43576..f29a044600d 100644 --- a/book/src/user/troubleshooting.md +++ b/book/src/user/troubleshooting.md @@ -48,7 +48,7 @@ Make sure you're using a release build on your native architecture. ### Syncer Lookahead Limit If your connection is slow, try -[downloading fewer blocks at a time](https://doc.zebra.zfnd.org/zebrad/config/struct.SyncSection.html#structfield.lookahead_limit): +[downloading fewer blocks at a time](https://docs.rs/zebrad/latest/zebrad/components/sync/struct.Config.html#structfield.lookahead_limit): ```toml [sync] @@ -58,7 +58,7 @@ max_concurrent_block_requests = 25 ### Peer Set Size -If your connection is slow, try [connecting to fewer peers](https://doc.zebra.zfnd.org/zebra_network/struct.Config.html#structfield.peerset_initial_target_size): +If your connection is slow, try [connecting to fewer peers](https://docs.rs/zebra-network/latest/zebra_network/struct.Config.html#structfield.peerset_initial_target_size): ```toml [network] diff --git a/deny.toml b/deny.toml index 80f10d1ae1c..2c37fbfc401 100644 --- a/deny.toml +++ b/deny.toml @@ -80,7 +80,8 @@ skip-tree = [ # ECC crates - # (no duplicate dependencies) + # wait for hdwallet to upgrade + { name = "ring", version = "=0.16.20" }, # zebra-utils dependencies diff --git a/tower-batch-control/Cargo.toml b/tower-batch-control/Cargo.toml index 2b721ecd5c5..49ad0297453 100644 --- a/tower-batch-control/Cargo.toml +++ b/tower-batch-control/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tower-batch-control" -version = "0.2.41-beta.6" +version = "0.2.41-beta.7" authors = ["Zcash Foundation ", "Tower Maintainers "] description = "Tower middleware for batch request processing" # # Legal diff --git a/tower-fallback/Cargo.toml b/tower-fallback/Cargo.toml index 4aa8a698d6a..206152e2aeb 100644 --- a/tower-fallback/Cargo.toml +++ b/tower-fallback/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tower-fallback" -version = "0.2.41-beta.6" +version = "0.2.41-beta.7" authors = ["Zcash Foundation "] description = "A Tower service combinator that sends requests to a first service, then retries processing on a second fallback service if the first service errors." license = "MIT OR Apache-2.0" diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index da659b9a2da..0f5bf1d9a62 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-chain" -version = "1.0.0-beta.30" +version = "1.0.0-beta.31" authors = ["Zcash Foundation "] description = "Core Zcash data structures" license = "MIT OR Apache-2.0" @@ -126,7 +126,7 @@ proptest-derive = { version = "0.4.0", optional = true } rand = { version = "0.8.5", optional = true } rand_chacha = { version = "0.3.1", optional = true } -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.30", optional = true } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.31", optional = true } [dev-dependencies] # Benchmarks diff --git a/zebra-chain/src/lib.rs b/zebra-chain/src/lib.rs index 212accf3e71..d1a06de044f 100644 --- a/zebra-chain/src/lib.rs +++ b/zebra-chain/src/lib.rs @@ -5,7 +5,7 @@ #![doc(html_favicon_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-favicon-128.png")] #![doc(html_logo_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-icon.png")] -#![doc(html_root_url = "https://doc.zebra.zfnd.org/zebra_chain")] +#![doc(html_root_url = "https://docs.rs/zebra_chain")] // Required by bitvec! macro #![recursion_limit = "256"] // diff --git a/zebra-chain/src/orchard/tree.rs b/zebra-chain/src/orchard/tree.rs index 3b5cb768107..9da1b00ab35 100644 --- a/zebra-chain/src/orchard/tree.rs +++ b/zebra-chain/src/orchard/tree.rs @@ -325,6 +325,10 @@ pub enum NoteCommitmentTreeError { } /// Orchard Incremental Note Commitment Tree +/// +/// Note that the default value of the [`Root`] type is `[0, 0, 0, 0]`. However, this value differs +/// from the default value of the root of the default tree which is the hash of the root's child +/// nodes. The default tree is the empty tree which has all leaves empty. #[derive(Debug, Serialize, Deserialize)] #[serde(into = "LegacyNoteCommitmentTree")] #[serde(from = "LegacyNoteCommitmentTree")] diff --git a/zebra-chain/src/parallel/tree.rs b/zebra-chain/src/parallel/tree.rs index 88eb6a3b94d..4f35dd44617 100644 --- a/zebra-chain/src/parallel/tree.rs +++ b/zebra-chain/src/parallel/tree.rs @@ -11,7 +11,9 @@ use crate::{ }; /// An argument wrapper struct for note commitment trees. -#[derive(Clone, Debug)] +/// +/// The default instance represents the trees and subtrees that correspond to the genesis block. +#[derive(Clone, Debug, Default, Eq, PartialEq)] pub struct NoteCommitmentTrees { /// The sprout note commitment tree. pub sprout: Arc, diff --git a/zebra-chain/src/sapling/tree.rs b/zebra-chain/src/sapling/tree.rs index bef003243c2..c1c15bb5dc0 100644 --- a/zebra-chain/src/sapling/tree.rs +++ b/zebra-chain/src/sapling/tree.rs @@ -305,6 +305,10 @@ pub enum NoteCommitmentTreeError { } /// Sapling Incremental Note Commitment Tree. +/// +/// Note that the default value of the [`Root`] type is `[0, 0, 0, 0]`. However, this value differs +/// from the default value of the root of the default tree which is the hash of the root's child +/// nodes. The default tree is the empty tree which has all leaves empty. #[derive(Debug, Serialize, Deserialize)] #[serde(into = "LegacyNoteCommitmentTree")] #[serde(from = "LegacyNoteCommitmentTree")] diff --git a/zebra-chain/src/sprout/tree.rs b/zebra-chain/src/sprout/tree.rs index 5c92692832a..3807e2fa31e 100644 --- a/zebra-chain/src/sprout/tree.rs +++ b/zebra-chain/src/sprout/tree.rs @@ -206,6 +206,10 @@ pub enum NoteCommitmentTreeError { /// Internally this wraps [`bridgetree::Frontier`], so that we can maintain and increment /// the full tree with only the minimal amount of non-empty nodes/leaves required. /// +/// Note that the default value of the [`Root`] type is `[0, 0, 0, 0]`. However, this value differs +/// from the default value of the root of the default tree (which is the empty tree) since it is the +/// pair-wise root-hash of the tree's empty leaves at the tree's root level. +/// /// [Sprout Note Commitment Tree]: https://zips.z.cash/protocol/protocol.pdf#merkletree /// [nullifier set]: https://zips.z.cash/protocol/protocol.pdf#nullifierset #[derive(Debug, Serialize, Deserialize)] diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index 9e86f848249..dc336ba0e27 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-consensus" -version = "1.0.0-beta.30" +version = "1.0.0-beta.31" authors = ["Zcash Foundation "] description = "Implementation of Zcash consensus checks" license = "MIT OR Apache-2.0" @@ -63,13 +63,13 @@ orchard = "0.6.0" zcash_proofs = { version = "0.13.0-rc.1", features = ["multicore" ] } wagyu-zcash-parameters = "0.2.0" -tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.6" } -tower-batch-control = { path = "../tower-batch-control/", version = "0.2.41-beta.6" } +tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.7" } +tower-batch-control = { path = "../tower-batch-control/", version = "0.2.41-beta.7" } -zebra-script = { path = "../zebra-script", version = "1.0.0-beta.30" } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.30" } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.30" } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.30" } +zebra-script = { path = "../zebra-script", version = "1.0.0-beta.31" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.31" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.31" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.31" } # prod feature progress-bar howudoin = { version = "0.1.2", optional = true } diff --git a/zebra-consensus/src/lib.rs b/zebra-consensus/src/lib.rs index f864eac60ed..a6c092e7b37 100644 --- a/zebra-consensus/src/lib.rs +++ b/zebra-consensus/src/lib.rs @@ -32,7 +32,7 @@ #![doc(html_favicon_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-favicon-128.png")] #![doc(html_logo_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-icon.png")] -#![doc(html_root_url = "https://doc.zebra.zfnd.org/zebra_consensus")] +#![doc(html_root_url = "https://docs.rs/zebra_consensus")] // // Rust 1.72 has a false positive when nested generics are used inside Arc. // This makes the `arc_with_non_send_sync` lint trigger on a lot of proptest code. diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index 1ebe4633dd4..a55f66f61a6 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-network" -version = "1.0.0-beta.30" +version = "1.0.0-beta.31" authors = ["Zcash Foundation ", "Tower Maintainers "] description = "Networking code for Zebra" # # Legal @@ -83,7 +83,7 @@ howudoin = { version = "0.1.2", optional = true } proptest = { version = "1.3.1", optional = true } proptest-derive = { version = "0.4.0", optional = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.30", features = ["async-error"] } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.31", features = ["async-error"] } [dev-dependencies] proptest = "1.3.1" diff --git a/zebra-network/src/address_book/tests/vectors.rs b/zebra-network/src/address_book/tests/vectors.rs index e401e6a5de3..16a9544429c 100644 --- a/zebra-network/src/address_book/tests/vectors.rs +++ b/zebra-network/src/address_book/tests/vectors.rs @@ -128,9 +128,7 @@ fn address_book_peer_order() { fn reconnection_peers_skips_recently_updated_ip() { // tests that reconnection_peers() skips addresses where there's a connection at that IP with a recent: // - `last_response` - test_reconnection_peers_skips_recently_updated_ip(true, |addr| { - MetaAddr::new_responded(addr, &PeerServices::NODE_NETWORK) - }); + test_reconnection_peers_skips_recently_updated_ip(true, MetaAddr::new_responded); // tests that reconnection_peers() *does not* skip addresses where there's a connection at that IP with a recent: // - `last_attempt` diff --git a/zebra-network/src/address_book_updater.rs b/zebra-network/src/address_book_updater.rs index ef503bc8d82..f0729b3efb0 100644 --- a/zebra-network/src/address_book_updater.rs +++ b/zebra-network/src/address_book_updater.rs @@ -1,6 +1,6 @@ //! The timestamp collector collects liveness information from peers. -use std::{net::SocketAddr, sync::Arc}; +use std::{cmp::max, net::SocketAddr, sync::Arc}; use thiserror::Error; use tokio::{ @@ -13,6 +13,9 @@ use crate::{ address_book::AddressMetrics, meta_addr::MetaAddrChange, AddressBook, BoxError, Config, }; +/// The minimum size of the address book updater channel. +pub const MIN_CHANNEL_SIZE: usize = 10; + /// The `AddressBookUpdater` hooks into incoming message streams for each peer /// and lets the owner of the sender handle update the address book. For /// example, it can be used to record per-connection last-seen timestamps, or @@ -46,7 +49,10 @@ impl AddressBookUpdater { ) { // Create an mpsc channel for peerset address book updates, // based on the maximum number of inbound and outbound peers. - let (worker_tx, mut worker_rx) = mpsc::channel(config.peerset_total_connection_limit()); + let (worker_tx, mut worker_rx) = mpsc::channel(max( + config.peerset_total_connection_limit(), + MIN_CHANNEL_SIZE, + )); let address_book = AddressBook::new( local_listener, diff --git a/zebra-network/src/lib.rs b/zebra-network/src/lib.rs index 2ca0177b8c8..309277fd0ee 100644 --- a/zebra-network/src/lib.rs +++ b/zebra-network/src/lib.rs @@ -131,7 +131,7 @@ #![doc(html_favicon_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-favicon-128.png")] #![doc(html_logo_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-icon.png")] -#![doc(html_root_url = "https://doc.zebra.zfnd.org/zebra_network")] +#![doc(html_root_url = "https://docs.rs/zebra_network")] // // Rust 1.72 has a false positive when nested generics are used inside Arc. // This makes the `arc_with_non_send_sync` lint trigger on a lot of proptest code. diff --git a/zebra-network/src/meta_addr.rs b/zebra-network/src/meta_addr.rs index 1f8572fd53c..98d3f32224a 100644 --- a/zebra-network/src/meta_addr.rs +++ b/zebra-network/src/meta_addr.rs @@ -280,6 +280,16 @@ pub enum MetaAddrChange { addr: PeerSocketAddr, }, + /// Updates an existing `MetaAddr` when we've made a successful connection with a peer. + UpdateConnected { + #[cfg_attr( + any(test, feature = "proptest-impl"), + proptest(strategy = "canonical_peer_addr_strategy()") + )] + addr: PeerSocketAddr, + services: PeerServices, + }, + /// Updates an existing `MetaAddr` when a peer responds with a message. UpdateResponded { #[cfg_attr( @@ -287,7 +297,6 @@ pub enum MetaAddrChange { proptest(strategy = "canonical_peer_addr_strategy()") )] addr: PeerSocketAddr, - services: PeerServices, }, /// Updates an existing `MetaAddr` when a peer fails. @@ -345,8 +354,8 @@ impl MetaAddr { }) } - /// Returns a [`MetaAddrChange::UpdateResponded`] for a peer that has just - /// sent us a message. + /// Returns a [`MetaAddrChange::UpdateConnected`] for a peer that has just successfully + /// connected. /// /// # Security /// @@ -354,16 +363,33 @@ impl MetaAddr { /// and the services must be the services from that peer's handshake. /// /// Otherwise: - /// - malicious peers could interfere with other peers' [`AddressBook`](crate::AddressBook) state, - /// or + /// - malicious peers could interfere with other peers' [`AddressBook`](crate::AddressBook) + /// state, or /// - Zebra could advertise unreachable addresses to its own peers. - pub fn new_responded(addr: PeerSocketAddr, services: &PeerServices) -> MetaAddrChange { - UpdateResponded { + pub fn new_connected(addr: PeerSocketAddr, services: &PeerServices) -> MetaAddrChange { + UpdateConnected { addr: canonical_peer_addr(*addr), services: *services, } } + /// Returns a [`MetaAddrChange::UpdateResponded`] for a peer that has just + /// sent us a message. + /// + /// # Security + /// + /// This address must be the remote address from an outbound connection. + /// + /// Otherwise: + /// - malicious peers could interfere with other peers' [`AddressBook`](crate::AddressBook) + /// state, or + /// - Zebra could advertise unreachable addresses to its own peers. + pub fn new_responded(addr: PeerSocketAddr) -> MetaAddrChange { + UpdateResponded { + addr: canonical_peer_addr(*addr), + } + } + /// Returns a [`MetaAddrChange::UpdateAttempt`] for a peer that we /// want to make an outbound connection to. pub fn new_reconnect(addr: PeerSocketAddr) -> MetaAddrChange { @@ -391,8 +417,7 @@ impl MetaAddr { } } - /// Returns a [`MetaAddrChange::UpdateFailed`] for a peer that has just had - /// an error. + /// Returns a [`MetaAddrChange::UpdateFailed`] for a peer that has just had an error. pub fn new_errored( addr: PeerSocketAddr, services: impl Into>, @@ -404,13 +429,10 @@ impl MetaAddr { } /// Create a new `MetaAddr` for a peer that has just shut down. - pub fn new_shutdown( - addr: PeerSocketAddr, - services: impl Into>, - ) -> MetaAddrChange { + pub fn new_shutdown(addr: PeerSocketAddr) -> MetaAddrChange { // TODO: if the peer shut down in the Responded state, preserve that // state. All other states should be treated as (timeout) errors. - MetaAddr::new_errored(addr, services.into()) + MetaAddr::new_errored(addr, None) } /// Return the address for this `MetaAddr`. @@ -696,6 +718,7 @@ impl MetaAddrChange { | NewAlternate { addr, .. } | NewLocal { addr, .. } | UpdateAttempt { addr } + | UpdateConnected { addr, .. } | UpdateResponded { addr, .. } | UpdateFailed { addr, .. } => *addr, } @@ -712,6 +735,7 @@ impl MetaAddrChange { | NewAlternate { addr, .. } | NewLocal { addr, .. } | UpdateAttempt { addr } + | UpdateConnected { addr, .. } | UpdateResponded { addr, .. } | UpdateFailed { addr, .. } => *addr = new_addr, } @@ -721,17 +745,18 @@ impl MetaAddrChange { pub fn untrusted_services(&self) -> Option { match self { NewInitial { .. } => None, + // TODO: split untrusted and direct services (#2324) NewGossiped { untrusted_services, .. - } => Some(*untrusted_services), - NewAlternate { + } + | NewAlternate { untrusted_services, .. } => Some(*untrusted_services), // TODO: create a "services implemented by Zebra" constant (#2324) NewLocal { .. } => Some(PeerServices::NODE_NETWORK), UpdateAttempt { .. } => None, - // TODO: split untrusted and direct services (#2324) - UpdateResponded { services, .. } => Some(*services), + UpdateConnected { services, .. } => Some(*services), + UpdateResponded { .. } => None, UpdateFailed { services, .. } => *services, } } @@ -747,9 +772,10 @@ impl MetaAddrChange { NewAlternate { .. } => None, // We know that our local listener is available NewLocal { .. } => Some(now), - UpdateAttempt { .. } => None, - UpdateResponded { .. } => None, - UpdateFailed { .. } => None, + UpdateAttempt { .. } + | UpdateConnected { .. } + | UpdateResponded { .. } + | UpdateFailed { .. } => None, } } @@ -775,33 +801,29 @@ impl MetaAddrChange { /// Return the last attempt for this change, if available. pub fn last_attempt(&self, now: Instant) -> Option { match self { - NewInitial { .. } => None, - NewGossiped { .. } => None, - NewAlternate { .. } => None, - NewLocal { .. } => None, + NewInitial { .. } | NewGossiped { .. } | NewAlternate { .. } | NewLocal { .. } => None, // Attempt changes are applied before we start the handshake to the // peer address. So the attempt time is a lower bound for the actual // handshake time. UpdateAttempt { .. } => Some(now), - UpdateResponded { .. } => None, - UpdateFailed { .. } => None, + UpdateConnected { .. } | UpdateResponded { .. } | UpdateFailed { .. } => None, } } /// Return the last response for this change, if available. pub fn last_response(&self, now: DateTime32) -> Option { match self { - NewInitial { .. } => None, - NewGossiped { .. } => None, - NewAlternate { .. } => None, - NewLocal { .. } => None, - UpdateAttempt { .. } => None, + NewInitial { .. } + | NewGossiped { .. } + | NewAlternate { .. } + | NewLocal { .. } + | UpdateAttempt { .. } => None, // If there is a large delay applying this change, then: // - the peer might stay in the `AttemptPending` state for longer, // - we might send outdated last seen times to our peers, and // - the peer will appear to be live for longer, delaying future // reconnection attempts. - UpdateResponded { .. } => Some(now), + UpdateConnected { .. } | UpdateResponded { .. } => Some(now), UpdateFailed { .. } => None, } } @@ -809,12 +831,13 @@ impl MetaAddrChange { /// Return the last failure for this change, if available. pub fn last_failure(&self, now: Instant) -> Option { match self { - NewInitial { .. } => None, - NewGossiped { .. } => None, - NewAlternate { .. } => None, - NewLocal { .. } => None, - UpdateAttempt { .. } => None, - UpdateResponded { .. } => None, + NewInitial { .. } + | NewGossiped { .. } + | NewAlternate { .. } + | NewLocal { .. } + | UpdateAttempt { .. } + | UpdateConnected { .. } + | UpdateResponded { .. } => None, // If there is a large delay applying this change, then: // - the peer might stay in the `AttemptPending` or `Responded` // states for longer, and @@ -833,7 +856,7 @@ impl MetaAddrChange { // local listeners get sanitized, so the state doesn't matter here NewLocal { .. } => NeverAttemptedGossiped, UpdateAttempt { .. } => AttemptPending, - UpdateResponded { .. } => Responded, + UpdateConnected { .. } | UpdateResponded { .. } => Responded, UpdateFailed { .. } => Failed, } } diff --git a/zebra-network/src/meta_addr/tests/vectors.rs b/zebra-network/src/meta_addr/tests/vectors.rs index 5b341901b18..40e00b66513 100644 --- a/zebra-network/src/meta_addr/tests/vectors.rs +++ b/zebra-network/src/meta_addr/tests/vectors.rs @@ -170,7 +170,7 @@ fn recently_responded_peer_is_gossipable() { .into_new_meta_addr(instant_now, local_now); // Create a peer that has responded - let peer = MetaAddr::new_responded(address, &PeerServices::NODE_NETWORK) + let peer = MetaAddr::new_responded(address) .apply_to_meta_addr(peer_seed, instant_now, chrono_now) .expect("Failed to create MetaAddr for responded peer"); @@ -191,7 +191,7 @@ fn not_so_recently_responded_peer_is_still_gossipable() { .into_new_meta_addr(instant_now, local_now); // Create a peer that has responded - let mut peer = MetaAddr::new_responded(address, &PeerServices::NODE_NETWORK) + let mut peer = MetaAddr::new_responded(address) .apply_to_meta_addr(peer_seed, instant_now, chrono_now) .expect("Failed to create MetaAddr for responded peer"); @@ -222,7 +222,7 @@ fn responded_long_ago_peer_is_not_gossipable() { .into_new_meta_addr(instant_now, local_now); // Create a peer that has responded - let mut peer = MetaAddr::new_responded(address, &PeerServices::NODE_NETWORK) + let mut peer = MetaAddr::new_responded(address) .apply_to_meta_addr(peer_seed, instant_now, chrono_now) .expect("Failed to create MetaAddr for responded peer"); @@ -253,7 +253,7 @@ fn long_delayed_change_is_not_applied() { .into_new_meta_addr(instant_now, local_now); // Create a peer that has responded - let peer = MetaAddr::new_responded(address, &PeerServices::NODE_NETWORK) + let peer = MetaAddr::new_responded(address) .apply_to_meta_addr(peer_seed, instant_now, chrono_now) .expect("Failed to create MetaAddr for responded peer"); @@ -297,7 +297,7 @@ fn later_revert_change_is_applied() { .into_new_meta_addr(instant_now, local_now); // Create a peer that has responded - let peer = MetaAddr::new_responded(address, &PeerServices::NODE_NETWORK) + let peer = MetaAddr::new_responded(address) .apply_to_meta_addr(peer_seed, instant_now, chrono_now) .expect("Failed to create MetaAddr for responded peer"); @@ -340,7 +340,7 @@ fn concurrent_state_revert_change_is_not_applied() { .into_new_meta_addr(instant_now, local_now); // Create a peer that has responded - let peer = MetaAddr::new_responded(address, &PeerServices::NODE_NETWORK) + let peer = MetaAddr::new_responded(address) .apply_to_meta_addr(peer_seed, instant_now, chrono_now) .expect("Failed to create MetaAddr for responded peer"); @@ -400,7 +400,7 @@ fn concurrent_state_progress_change_is_applied() { .into_new_meta_addr(instant_now, local_now); // Create a peer that has responded - let peer = MetaAddr::new_responded(address, &PeerServices::NODE_NETWORK) + let peer = MetaAddr::new_responded(address) .apply_to_meta_addr(peer_seed, instant_now, chrono_now) .expect("Failed to create MetaAddr for responded peer"); diff --git a/zebra-network/src/peer/connection.rs b/zebra-network/src/peer/connection.rs index 2266085812e..f52ef565de9 100644 --- a/zebra-network/src/peer/connection.rs +++ b/zebra-network/src/peer/connection.rs @@ -1289,11 +1289,15 @@ where // // // The inbound service must be called immediately after a buffer slot is reserved. + // + // The inbound service never times out in readiness, because the load shed layer is always + // ready, and returns an error in response to the request instead. if self.svc.ready().await.is_err() { self.fail_with(PeerError::ServiceShutdown).await; return; } + // Inbound service request timeouts are handled by the timeout layer in `start::start()`. let rsp = match self.svc.call(req.clone()).await { Err(e) => { if e.is::() { diff --git a/zebra-network/src/peer/handshake.rs b/zebra-network/src/peer/handshake.rs index e0d981951ad..60a907ab4c4 100644 --- a/zebra-network/src/peer/handshake.rs +++ b/zebra-network/src/peer/handshake.rs @@ -556,13 +556,14 @@ where } } -/// Negotiate the Zcash network protocol version with the remote peer -/// at `connected_addr`, using the connection `peer_conn`. +/// Negotiate the Zcash network protocol version with the remote peer at `connected_addr`, using +/// the connection `peer_conn`. /// -/// We split `Handshake` into its components before calling this function, -/// to avoid infectious `Sync` bounds on the returned future. +/// We split `Handshake` into its components before calling this function, to avoid infectious +/// `Sync` bounds on the returned future. /// -/// Returns the [`VersionMessage`] sent by the remote peer. +/// Returns the [`VersionMessage`] sent by the remote peer, and the [`Version`] negotiated with the +/// remote peer, inside a [`ConnectionInfo`] struct. #[allow(clippy::too_many_arguments)] pub async fn negotiate_version( peer_conn: &mut Framed, @@ -573,7 +574,7 @@ pub async fn negotiate_version( our_services: PeerServices, relay: bool, mut minimum_peer_version: MinimumPeerVersion, -) -> Result +) -> Result, HandshakeError> where PeerTransport: AsyncRead + AsyncWrite + Unpin + Send + 'static, { @@ -729,6 +730,7 @@ where // Reject connections to peers on old versions, because they might not know about all // network upgrades and could lead to chain forks or slower block propagation. let min_version = minimum_peer_version.current(); + if remote.version < min_version { debug!( remote_ip = ?their_addr, @@ -756,37 +758,44 @@ where ); // Disconnect if peer is using an obsolete version. - Err(HandshakeError::ObsoleteVersion(remote.version))?; - } else { - let negotiated_version = min(constants::CURRENT_NETWORK_PROTOCOL_VERSION, remote.version); + return Err(HandshakeError::ObsoleteVersion(remote.version)); + } - debug!( - remote_ip = ?their_addr, - ?remote.version, - ?negotiated_version, - ?min_version, - ?remote.user_agent, - "negotiated network protocol version with peer", - ); + let negotiated_version = min(constants::CURRENT_NETWORK_PROTOCOL_VERSION, remote.version); + + // Limit containing struct size, and avoid multiple duplicates of 300+ bytes of data. + let connection_info = Arc::new(ConnectionInfo { + connected_addr: *connected_addr, + remote, + negotiated_version, + }); + + debug!( + remote_ip = ?their_addr, + ?connection_info.remote.version, + ?negotiated_version, + ?min_version, + ?connection_info.remote.user_agent, + "negotiated network protocol version with peer", + ); - // the value is the number of connected handshakes, by peer IP and protocol version - metrics::counter!( - "zcash.net.peers.connected", - 1, - "remote_ip" => their_addr.to_string(), - "remote_version" => remote.version.to_string(), - "negotiated_version" => negotiated_version.to_string(), - "min_version" => min_version.to_string(), - "user_agent" => remote.user_agent.clone(), - ); + // the value is the number of connected handshakes, by peer IP and protocol version + metrics::counter!( + "zcash.net.peers.connected", + 1, + "remote_ip" => their_addr.to_string(), + "remote_version" => connection_info.remote.version.to_string(), + "negotiated_version" => negotiated_version.to_string(), + "min_version" => min_version.to_string(), + "user_agent" => connection_info.remote.user_agent.clone(), + ); - // the value is the remote version of the most recent connected handshake from each peer - metrics::gauge!( - "zcash.net.peers.version.connected", - remote.version.0 as f64, - "remote_ip" => their_addr.to_string(), - ); - } + // the value is the remote version of the most recent connected handshake from each peer + metrics::gauge!( + "zcash.net.peers.version.connected", + connection_info.remote.version.0 as f64, + "remote_ip" => their_addr.to_string(), + ); peer_conn.send(Message::Verack).await?; @@ -812,7 +821,7 @@ where } } - Ok(remote) + Ok(connection_info) } /// A handshake request. @@ -894,7 +903,7 @@ where .finish(), ); - let remote = negotiate_version( + let connection_info = negotiate_version( &mut peer_conn, &connected_addr, config, @@ -906,8 +915,8 @@ where ) .await?; - let remote_canonical_addr = remote.address_from.addr(); - let remote_services = remote.services; + let remote_canonical_addr = connection_info.remote.address_from.addr(); + let remote_services = connection_info.remote.services; // If we've learned potential peer addresses from an inbound // connection or handshake, add those addresses to our address book. @@ -929,33 +938,23 @@ where let _ = address_book_updater.send(alt_addr).await; } - // The handshake succeeded: update the peer status from AttemptPending to Responded + // The handshake succeeded: update the peer status from AttemptPending to Responded, + // and send initial connection info. if let Some(book_addr) = connected_addr.get_address_book_addr() { // the collector doesn't depend on network activity, // so this await should not hang let _ = address_book_updater - .send(MetaAddr::new_responded(book_addr, &remote_services)) + .send(MetaAddr::new_connected(book_addr, &remote_services)) .await; } - // Set the connection's version to the minimum of the received version or our own. - let negotiated_version = - std::cmp::min(remote.version, constants::CURRENT_NETWORK_PROTOCOL_VERSION); - - // Limit containing struct size, and avoid multiple duplicates of 300+ bytes of data. - let connection_info = Arc::new(ConnectionInfo { - connected_addr, - remote, - negotiated_version, - }); - // Reconfigure the codec to use the negotiated version. // // TODO: The tokio documentation says not to do this while any frames are still being processed. // Since we don't know that here, another way might be to release the tcp // stream from the unversioned Framed wrapper and construct a new one with a versioned codec. let bare_codec = peer_conn.codec_mut(); - bare_codec.reconfigure_version(negotiated_version); + bare_codec.reconfigure_version(connection_info.negotiated_version); debug!("constructing client, spawning server"); @@ -1075,7 +1074,6 @@ where let heartbeat_task = tokio::spawn( send_periodic_heartbeats_with_shutdown_handle( connected_addr, - remote_services, shutdown_rx, server_tx.clone(), address_book_updater.clone(), @@ -1213,7 +1211,6 @@ pub(crate) async fn register_inventory_status( /// Returning from this function terminates the connection's heartbeat task. async fn send_periodic_heartbeats_with_shutdown_handle( connected_addr: ConnectedAddr, - remote_services: PeerServices, shutdown_rx: oneshot::Receiver, server_tx: futures::channel::mpsc::Sender, heartbeat_ts_collector: tokio::sync::mpsc::Sender, @@ -1222,7 +1219,6 @@ async fn send_periodic_heartbeats_with_shutdown_handle( let heartbeat_run_loop = send_periodic_heartbeats_run_loop( connected_addr, - remote_services, server_tx, heartbeat_ts_collector.clone(), ); @@ -1246,7 +1242,6 @@ async fn send_periodic_heartbeats_with_shutdown_handle( PeerError::ClientCancelledHeartbeatTask, &heartbeat_ts_collector, &connected_addr, - &remote_services, ) .await } @@ -1256,7 +1251,6 @@ async fn send_periodic_heartbeats_with_shutdown_handle( PeerError::ClientDropped, &heartbeat_ts_collector, &connected_addr, - &remote_services, ) .await } @@ -1275,7 +1269,6 @@ async fn send_periodic_heartbeats_with_shutdown_handle( /// See `send_periodic_heartbeats_with_shutdown_handle` for details. async fn send_periodic_heartbeats_run_loop( connected_addr: ConnectedAddr, - remote_services: PeerServices, mut server_tx: futures::channel::mpsc::Sender, heartbeat_ts_collector: tokio::sync::mpsc::Sender, ) -> Result<(), BoxError> { @@ -1294,13 +1287,7 @@ async fn send_periodic_heartbeats_run_loop( // We've reached another heartbeat interval without // shutting down, so do a heartbeat request. let heartbeat = send_one_heartbeat(&mut server_tx); - heartbeat_timeout( - heartbeat, - &heartbeat_ts_collector, - &connected_addr, - &remote_services, - ) - .await?; + heartbeat_timeout(heartbeat, &heartbeat_ts_collector, &connected_addr).await?; // # Security // @@ -1312,7 +1299,7 @@ async fn send_periodic_heartbeats_run_loop( // the collector doesn't depend on network activity, // so this await should not hang let _ = heartbeat_ts_collector - .send(MetaAddr::new_responded(book_addr, &remote_services)) + .send(MetaAddr::new_responded(book_addr)) .await; } } @@ -1375,29 +1362,16 @@ async fn heartbeat_timeout( fut: F, address_book_updater: &tokio::sync::mpsc::Sender, connected_addr: &ConnectedAddr, - remote_services: &PeerServices, ) -> Result where F: Future>, { let t = match timeout(constants::HEARTBEAT_INTERVAL, fut).await { Ok(inner_result) => { - handle_heartbeat_error( - inner_result, - address_book_updater, - connected_addr, - remote_services, - ) - .await? + handle_heartbeat_error(inner_result, address_book_updater, connected_addr).await? } Err(elapsed) => { - handle_heartbeat_error( - Err(elapsed), - address_book_updater, - connected_addr, - remote_services, - ) - .await? + handle_heartbeat_error(Err(elapsed), address_book_updater, connected_addr).await? } }; @@ -1409,7 +1383,6 @@ async fn handle_heartbeat_error( result: Result, address_book_updater: &tokio::sync::mpsc::Sender, connected_addr: &ConnectedAddr, - remote_services: &PeerServices, ) -> Result where E: std::fmt::Debug, @@ -1427,7 +1400,7 @@ where // - after the first error or shutdown, the peer is disconnected if let Some(book_addr) = connected_addr.get_address_book_addr() { let _ = address_book_updater - .send(MetaAddr::new_errored(book_addr, *remote_services)) + .send(MetaAddr::new_errored(book_addr, None)) .await; } Err(err) @@ -1440,13 +1413,12 @@ async fn handle_heartbeat_shutdown( peer_error: PeerError, address_book_updater: &tokio::sync::mpsc::Sender, connected_addr: &ConnectedAddr, - remote_services: &PeerServices, ) -> Result<(), BoxError> { tracing::debug!(?peer_error, "client shutdown, shutting down heartbeat"); if let Some(book_addr) = connected_addr.get_address_book_addr() { let _ = address_book_updater - .send(MetaAddr::new_shutdown(book_addr, *remote_services)) + .send(MetaAddr::new_shutdown(book_addr)) .await; } diff --git a/zebra-network/src/peer_set/candidate_set.rs b/zebra-network/src/peer_set/candidate_set.rs index f951bda5b9b..b60dc74732f 100644 --- a/zebra-network/src/peer_set/candidate_set.rs +++ b/zebra-network/src/peer_set/candidate_set.rs @@ -414,6 +414,8 @@ where } /// Returns the address book for this `CandidateSet`. + #[cfg(any(test, feature = "proptest-impl"))] + #[allow(dead_code)] pub async fn address_book(&self) -> Arc> { self.address_book.clone() } diff --git a/zebra-network/src/peer_set/initialize.rs b/zebra-network/src/peer_set/initialize.rs index 9dc74d99f6c..30404ac6aea 100644 --- a/zebra-network/src/peer_set/initialize.rs +++ b/zebra-network/src/peer_set/initialize.rs @@ -29,7 +29,6 @@ use tokio_stream::wrappers::IntervalStream; use tower::{ buffer::Buffer, discover::Change, layer::Layer, util::BoxService, Service, ServiceExt, }; -use tracing::Span; use tracing_futures::Instrument; use zebra_chain::{chain_tip::ChainTip, diagnostic::task::WaitForPanics}; @@ -197,7 +196,7 @@ where config.clone(), outbound_connector.clone(), peerset_tx.clone(), - address_book_updater, + address_book_updater.clone(), ); let initial_peers_join = tokio::spawn(initial_peers_fut.in_current_span()); @@ -242,6 +241,7 @@ where outbound_connector, peerset_tx, active_outbound_connections, + address_book_updater, ); let crawl_guard = tokio::spawn(crawl_fut.in_current_span()); @@ -740,6 +740,7 @@ enum CrawlerAction { /// /// Uses `active_outbound_connections` to limit the number of active outbound connections /// across both the initial peers and crawler. The limit is based on `config`. +#[allow(clippy::too_many_arguments)] #[instrument( skip( config, @@ -749,6 +750,7 @@ enum CrawlerAction { outbound_connector, peerset_tx, active_outbound_connections, + address_book_updater, ), fields( new_peer_interval = ?config.crawl_new_peer_interval, @@ -762,6 +764,7 @@ async fn crawl_and_dial( outbound_connector: C, peerset_tx: futures::channel::mpsc::Sender, mut active_outbound_connections: ActiveConnectionCounter, + address_book_updater: tokio::sync::mpsc::Sender, ) -> Result<(), BoxError> where C: Service< @@ -783,8 +786,6 @@ where "starting the peer address crawler", ); - let address_book = candidates.address_book().await; - // # Concurrency // // Allow tasks using the candidate set to be spawned, so they can run concurrently. @@ -857,7 +858,7 @@ where let candidates = candidates.clone(); let outbound_connector = outbound_connector.clone(); let peerset_tx = peerset_tx.clone(); - let address_book = address_book.clone(); + let address_book_updater = address_book_updater.clone(); let demand_tx = demand_tx.clone(); // Increment the connection count before we spawn the connection. @@ -886,7 +887,7 @@ where outbound_connector, outbound_connection_tracker, peerset_tx, - address_book, + address_book_updater, demand_tx, ) .await?; @@ -1020,7 +1021,7 @@ where outbound_connector, outbound_connection_tracker, peerset_tx, - address_book, + address_book_updater, demand_tx ))] async fn dial( @@ -1028,7 +1029,7 @@ async fn dial( mut outbound_connector: C, outbound_connection_tracker: ConnectionTracker, mut peerset_tx: futures::channel::mpsc::Sender, - address_book: Arc>, + address_book_updater: tokio::sync::mpsc::Sender, mut demand_tx: futures::channel::mpsc::Sender, ) -> Result<(), BoxError> where @@ -1069,8 +1070,8 @@ where } // The connection was never opened, or it failed the handshake and was dropped. Err(error) => { - debug!(?error, ?candidate.addr, "failed to make outbound connection to peer"); - report_failed(address_book.clone(), candidate).await; + info!(?error, ?candidate.addr, "failed to make outbound connection to peer"); + report_failed(address_book_updater.clone(), candidate).await; // The demand signal that was taken out of the queue to attempt to connect to the // failed candidate never turned into a connection, so add it back. @@ -1090,25 +1091,15 @@ where Ok(()) } -/// Mark `addr` as a failed peer in `address_book`. -#[instrument(skip(address_book))] -async fn report_failed(address_book: Arc>, addr: MetaAddr) { - let addr = MetaAddr::new_errored(addr.addr, addr.services); +/// Mark `addr` as a failed peer to `address_book_updater`. +#[instrument(skip(address_book_updater))] +async fn report_failed( + address_book_updater: tokio::sync::mpsc::Sender, + addr: MetaAddr, +) { + // The connection info is the same as what's already in the address book. + let addr = MetaAddr::new_errored(addr.addr, None); - // # Correctness - // - // Spawn address book accesses on a blocking thread, to avoid deadlocks (see #1976). - let span = Span::current(); - let updated_addr = tokio::task::spawn_blocking(move || { - span.in_scope(|| address_book.lock().unwrap().update(addr)) - }) - .wait_for_panics() - .await; - - assert_eq!( - updated_addr.map(|addr| addr.addr()), - Some(addr.addr()), - "incorrect address updated by address book: \ - original: {addr:?}, updated: {updated_addr:?}" - ); + // Ignore send errors on Zebra shutdown. + let _ = address_book_updater.send(addr).await; } diff --git a/zebra-network/src/peer_set/initialize/tests/vectors.rs b/zebra-network/src/peer_set/initialize/tests/vectors.rs index c871ab43227..59bafdc771e 100644 --- a/zebra-network/src/peer_set/initialize/tests/vectors.rs +++ b/zebra-network/src/peer_set/initialize/tests/vectors.rs @@ -24,7 +24,6 @@ use futures::{channel::mpsc, FutureExt, StreamExt}; use indexmap::IndexSet; use tokio::{io::AsyncWriteExt, net::TcpStream, task::JoinHandle}; use tower::{service_fn, Layer, Service, ServiceExt}; -use tracing::Span; use zebra_chain::{chain_tip::NoChainTip, parameters::Network, serialization::DateTime32}; use zebra_test::net::random_known_port; @@ -1517,13 +1516,8 @@ where config.peerset_initial_target_size = peerset_initial_target_size; } - // Manually initialize an address book without a timestamp tracker. - let mut address_book = AddressBook::new( - config.listen_addr, - config.network, - config.max_connections_per_ip, - Span::current(), - ); + let (address_book, address_book_updater, _address_metrics, _address_book_updater_guard) = + AddressBookUpdater::spawn(&config, config.listen_addr); // Add enough fake peers to go over the limit, even if the limit is zero. let over_limit_peers = config.peerset_outbound_connection_limit() * 2 + 1; @@ -1540,7 +1534,10 @@ where .new_gossiped_change() .expect("created MetaAddr contains enough information to represent a gossiped address"); - address_book.update(addr); + address_book + .lock() + .expect("panic in previous thread while accessing the address book") + .update(addr); } // Create a fake peer set. @@ -1555,8 +1552,6 @@ where Ok(rsp) }); - let address_book = Arc::new(std::sync::Mutex::new(address_book)); - // Make the channels large enough to hold all the peers. let (peerset_tx, peerset_rx) = mpsc::channel::(over_limit_peers); let (mut demand_tx, demand_rx) = mpsc::channel::(over_limit_peers); @@ -1581,6 +1576,7 @@ where outbound_connector, peerset_tx, active_outbound_connections, + address_book_updater, ); let crawl_task_handle = tokio::spawn(crawl_fut); diff --git a/zebra-node-services/Cargo.toml b/zebra-node-services/Cargo.toml index d6372f08963..d0b8dcb9748 100644 --- a/zebra-node-services/Cargo.toml +++ b/zebra-node-services/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-node-services" -version = "1.0.0-beta.30" +version = "1.0.0-beta.31" authors = ["Zcash Foundation "] description = "The interfaces of some Zebra node services" license = "MIT OR Apache-2.0" @@ -35,7 +35,7 @@ rpc-client = [ ] [dependencies] -zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.30" } +zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.31" } # Optional dependencies diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 9001552832a..3869e0a238b 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-rpc" -version = "1.0.0-beta.30" +version = "1.0.0-beta.31" authors = ["Zcash Foundation "] description = "A Zebra JSON Remote Procedure Call (JSON-RPC) interface" license = "MIT OR Apache-2.0" @@ -70,12 +70,12 @@ zcash_address = { version = "0.3.0", optional = true } # Test-only feature proptest-impl proptest = { version = "1.3.1", optional = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.30", features = ["json-conversion"] } -zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.30" } -zebra-network = { path = "../zebra-network", version = "1.0.0-beta.30" } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.30" } -zebra-script = { path = "../zebra-script", version = "1.0.0-beta.30" } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.30" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.31", features = ["json-conversion"] } +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.31" } +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.31" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.31" } +zebra-script = { path = "../zebra-script", version = "1.0.0-beta.31" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.31" } [dev-dependencies] insta = { version = "1.33.0", features = ["redactions", "json", "ron"] } diff --git a/zebra-rpc/src/config.rs b/zebra-rpc/src/config.rs index 10a0ff4dfff..b5b4569f74a 100644 --- a/zebra-rpc/src/config.rs +++ b/zebra-rpc/src/config.rs @@ -34,7 +34,7 @@ pub struct Config { /// /// Zebra's RPC server has a separate thread pool and a `tokio` executor for each thread. /// State queries are run concurrently using the shared thread pool controlled by - /// the [`SyncSection.parallel_cpu_threads`](https://doc.zebra.zfnd.org/zebrad/config/struct.SyncSection.html#structfield.parallel_cpu_threads) config. + /// the [`SyncSection.parallel_cpu_threads`](https://docs.rs/zebrad/latest/zebrad/components/sync/struct.Config.html#structfield.parallel_cpu_threads) config. /// /// We recommend setting both configs to `0` (automatic scaling) for the best performance. /// This uses one thread per available CPU core. diff --git a/zebra-rpc/src/lib.rs b/zebra-rpc/src/lib.rs index 2f5dae4a097..81a30d78e0c 100644 --- a/zebra-rpc/src/lib.rs +++ b/zebra-rpc/src/lib.rs @@ -2,7 +2,7 @@ #![doc(html_favicon_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-favicon-128.png")] #![doc(html_logo_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-icon.png")] -#![doc(html_root_url = "https://doc.zebra.zfnd.org/zebra_rpc")] +#![doc(html_root_url = "https://docs.rs/zebra_rpc")] pub mod config; pub mod constants; diff --git a/zebra-scan/Cargo.toml b/zebra-scan/Cargo.toml index 469f132a452..ae8df1d4667 100644 --- a/zebra-scan/Cargo.toml +++ b/zebra-scan/Cargo.toml @@ -34,6 +34,6 @@ ff = "0.13.0" group = "0.13.0" tokio = { version = "1.33.0", features = ["test-util"] } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.30" } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.30" } -zebra-test = { path = "../zebra-test", version = "1.0.0-beta.30" } +zebra-state = { path = "../zebra-state" } +zebra-chain = { path = "../zebra-chain" } +zebra-test = { path = "../zebra-test" } diff --git a/zebra-scan/src/lib.rs b/zebra-scan/src/lib.rs index 0feff7cab5a..47a569e8ea3 100644 --- a/zebra-scan/src/lib.rs +++ b/zebra-scan/src/lib.rs @@ -2,7 +2,7 @@ #![doc(html_favicon_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-favicon-128.png")] #![doc(html_logo_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-icon.png")] -#![doc(html_root_url = "https://doc.zebra.zfnd.org/zebra_state")] +#![doc(html_root_url = "https://docs.rs/zebra_scan")] #[cfg(test)] mod tests; diff --git a/zebra-scan/src/tests.rs b/zebra-scan/src/tests.rs index 5c7c8df857e..ec792f44560 100644 --- a/zebra-scan/src/tests.rs +++ b/zebra-scan/src/tests.rs @@ -72,21 +72,23 @@ async fn scanning_from_populated_zebra_state() -> Result<()> { let mut transactions_found = 0; let mut transactions_scanned = 0; let mut blocks_scanned = 0; + // TODO: Accessing the state database directly is ok in the tests, but not in production code. + // Use `Request::Block` if the code is copied to production. while let Some(block) = db.block(height.into()) { // We fake the sapling tree size to 1 because we are not in Sapling heights. let sapling_tree_size = 1; let orchard_tree_size = db .orchard_tree_by_hash_or_height(height.into()) - .expect("should exist") + .expect("each state block must have a sapling tree") .count(); let chain_metadata = ChainMetadata { sapling_commitment_tree_size: sapling_tree_size .try_into() - .expect("position should fit in u32"), + .expect("sapling position is limited to u32::MAX"), orchard_commitment_tree_size: orchard_tree_size .try_into() - .expect("position should fit in u32"), + .expect("orchard position is limited to u32::MAX"), }; let compact_block = block_to_compact(block, chain_metadata); diff --git a/zebra-script/Cargo.toml b/zebra-script/Cargo.toml index e337ab6e641..cb60d7ec10b 100644 --- a/zebra-script/Cargo.toml +++ b/zebra-script/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-script" -version = "1.0.0-beta.30" +version = "1.0.0-beta.31" authors = ["Zcash Foundation "] description = "Zebra script verification wrapping zcashd's zcash_script library" license = "MIT OR Apache-2.0" @@ -17,7 +17,7 @@ categories = ["api-bindings", "cryptography::cryptocurrencies"] [dependencies] zcash_script = "0.1.14" -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.30" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.31" } thiserror = "1.0.48" displaydoc = "0.2.4" diff --git a/zebra-script/src/lib.rs b/zebra-script/src/lib.rs index 24f55025bdf..1d0e88756f4 100644 --- a/zebra-script/src/lib.rs +++ b/zebra-script/src/lib.rs @@ -1,7 +1,7 @@ //! Zebra script verification wrapping zcashd's zcash_script library #![doc(html_favicon_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-favicon-128.png")] #![doc(html_logo_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-icon.png")] -#![doc(html_root_url = "https://doc.zebra.zfnd.org/zebra_script")] +#![doc(html_root_url = "https://docs.rs/zebra_script")] // We allow unsafe code, so we can call zcash_script #![allow(unsafe_code)] diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 8ce11d76931..637ecfe5215 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-state" -version = "1.0.0-beta.30" +version = "1.0.0-beta.31" authors = ["Zcash Foundation "] description = "State contextual verification and storage code for Zebra" license = "MIT OR Apache-2.0" @@ -73,13 +73,13 @@ tracing = "0.1.39" elasticsearch = { version = "8.5.0-alpha.1", default-features = false, features = ["rustls-tls"], optional = true } serde_json = { version = "1.0.108", package = "serde_json", optional = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.30", features = ["async-error"] } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.31", features = ["async-error"] } # prod feature progress-bar howudoin = { version = "0.1.2", optional = true } # test feature proptest-impl -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.30", optional = true } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.31", optional = true } proptest = { version = "1.3.1", optional = true } proptest-derive = { version = "0.4.0", optional = true } diff --git a/zebra-state/src/lib.rs b/zebra-state/src/lib.rs index e01dfb734cf..2855483406b 100644 --- a/zebra-state/src/lib.rs +++ b/zebra-state/src/lib.rs @@ -10,7 +10,7 @@ #![doc(html_favicon_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-favicon-128.png")] #![doc(html_logo_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-icon.png")] -#![doc(html_root_url = "https://doc.zebra.zfnd.org/zebra_state")] +#![doc(html_root_url = "https://docs.rs/zebra_state")] // // Rust 1.72 has a false positive when nested generics are used inside Arc. // This makes the `arc_with_non_send_sync` lint trigger on a lot of proptest code. diff --git a/zebra-state/src/request.rs b/zebra-state/src/request.rs index f2513119e2d..0db06735ca7 100644 --- a/zebra-state/src/request.rs +++ b/zebra-state/src/request.rs @@ -224,6 +224,9 @@ pub struct ContextuallyVerifiedBlock { } /// Wraps note commitment trees and the history tree together. +/// +/// The default instance represents the treestate that corresponds to the genesis block. +#[derive(Clone, Debug, Default, Eq, PartialEq)] pub struct Treestate { /// Note commitment trees. pub note_commitment_trees: NoteCommitmentTrees, @@ -253,20 +256,6 @@ impl Treestate { } } -/// Contains a block ready to be committed together with its associated -/// treestate. -/// -/// Zebra's non-finalized state passes this `struct` over to the finalized state -/// when committing a block. The associated treestate is passed so that the -/// finalized state does not have to retrieve the previous treestate from the -/// database and recompute a new one. -pub struct SemanticallyVerifiedBlockWithTrees { - /// A block ready to be committed. - pub verified: SemanticallyVerifiedBlock, - /// The tresstate associated with the block. - pub treestate: Treestate, -} - /// Contains a block ready to be committed. /// /// Zebra's state service passes this `enum` over to the finalized state @@ -281,6 +270,54 @@ pub enum FinalizableBlock { }, } +/// Contains a block with all its associated data that the finalized state can commit to its +/// database. +/// +/// Note that it's the constructor's responsibility to ensure that all data is valid and verified. +pub struct FinalizedBlock { + /// The block to commit to the state. + pub(super) block: Arc, + /// The hash of the block. + pub(super) hash: block::Hash, + /// The height of the block. + pub(super) height: block::Height, + /// New transparent outputs created in this block, indexed by + /// [`OutPoint`](transparent::OutPoint). + pub(super) new_outputs: HashMap, + /// A precomputed list of the hashes of the transactions in this block, in the same order as + /// `block.transactions`. + pub(super) transaction_hashes: Arc<[transaction::Hash]>, + /// The tresstate associated with the block. + pub(super) treestate: Treestate, +} + +impl FinalizedBlock { + /// Constructs [`FinalizedBlock`] from [`CheckpointVerifiedBlock`] and its [`Treestate`]. + pub fn from_checkpoint_verified(block: CheckpointVerifiedBlock, treestate: Treestate) -> Self { + Self::from_semantically_verified(SemanticallyVerifiedBlock::from(block), treestate) + } + + /// Constructs [`FinalizedBlock`] from [`ContextuallyVerifiedBlock`] and its [`Treestate`]. + pub fn from_contextually_verified( + block: ContextuallyVerifiedBlock, + treestate: Treestate, + ) -> Self { + Self::from_semantically_verified(SemanticallyVerifiedBlock::from(block), treestate) + } + + /// Constructs [`FinalizedBlock`] from [`SemanticallyVerifiedBlock`] and its [`Treestate`]. + fn from_semantically_verified(block: SemanticallyVerifiedBlock, treestate: Treestate) -> Self { + Self { + block: block.block, + hash: block.hash, + height: block.height, + new_outputs: block.new_outputs, + transaction_hashes: block.transaction_hashes, + treestate, + } + } +} + impl FinalizableBlock { /// Create a new [`FinalizableBlock`] given a [`ContextuallyVerifiedBlock`]. pub fn new(contextually_verified: ContextuallyVerifiedBlock, treestate: Treestate) -> Self { diff --git a/zebra-state/src/service/finalized_state.rs b/zebra-state/src/service/finalized_state.rs index fde5c414c28..55012649e4d 100644 --- a/zebra-state/src/service/finalized_state.rs +++ b/zebra-state/src/service/finalized_state.rs @@ -23,7 +23,7 @@ use std::{ use zebra_chain::{block, parallel::tree::NoteCommitmentTrees, parameters::Network}; use crate::{ - request::{FinalizableBlock, SemanticallyVerifiedBlockWithTrees, Treestate}, + request::{FinalizableBlock, FinalizedBlock, Treestate}, service::{check, QueuedCheckpointVerified}, BoxError, CheckpointVerifiedBlock, CloneError, Config, }; @@ -38,7 +38,10 @@ mod arbitrary; #[cfg(test)] mod tests; -pub use disk_format::{OutputIndex, OutputLocation, TransactionLocation, MAX_ON_DISK_HEIGHT}; +pub use disk_format::{OutputIndex, OutputLocation, TransactionLocation}; + +#[cfg(any(test, feature = "proptest-impl"))] +pub use disk_format::MAX_ON_DISK_HEIGHT; pub(super) use zebra_db::ZebraDb; @@ -302,17 +305,15 @@ impl FinalizedState { let sapling_root = note_commitment_trees.sapling.root(); let orchard_root = note_commitment_trees.orchard.root(); history_tree_mut.push(self.network(), block.clone(), sapling_root, orchard_root)?; + let treestate = Treestate { + note_commitment_trees, + history_tree, + }; ( checkpoint_verified.height, checkpoint_verified.hash, - SemanticallyVerifiedBlockWithTrees { - verified: checkpoint_verified.0, - treestate: Treestate { - note_commitment_trees, - history_tree, - }, - }, + FinalizedBlock::from_checkpoint_verified(checkpoint_verified, treestate), Some(prev_note_commitment_trees), ) } @@ -322,10 +323,7 @@ impl FinalizedState { } => ( contextually_verified.height, contextually_verified.hash, - SemanticallyVerifiedBlockWithTrees { - verified: contextually_verified.into(), - treestate, - }, + FinalizedBlock::from_contextually_verified(contextually_verified, treestate), prev_note_commitment_trees, ), }; @@ -336,7 +334,7 @@ impl FinalizedState { // Assert that callers (including unit tests) get the chain order correct if self.db.is_empty() { assert_eq!( - committed_tip_hash, finalized.verified.block.header.previous_block_hash, + committed_tip_hash, finalized.block.header.previous_block_hash, "the first block added to an empty state must be a genesis block, source: {source}", ); assert_eq!( @@ -352,13 +350,13 @@ impl FinalizedState { ); assert_eq!( - committed_tip_hash, finalized.verified.block.header.previous_block_hash, + committed_tip_hash, finalized.block.header.previous_block_hash, "committed block must be a child of the finalized tip, source: {source}", ); } #[cfg(feature = "elasticsearch")] - let finalized_block = finalized.verified.block.clone(); + let finalized_inner_block = finalized.block.clone(); let note_commitment_trees = finalized.treestate.note_commitment_trees.clone(); let result = @@ -368,7 +366,7 @@ impl FinalizedState { if result.is_ok() { // Save blocks to elasticsearch if the feature is enabled. #[cfg(feature = "elasticsearch")] - self.elasticsearch(&finalized_block); + self.elasticsearch(&finalized_inner_block); // TODO: move the stop height check to the syncer (#3442) if self.is_at_stop_height(height) { diff --git a/zebra-state/src/service/finalized_state/zebra_db/block.rs b/zebra-state/src/service/finalized_state/zebra_db/block.rs index 0a1a49e72cb..7d4d4006405 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/block.rs @@ -30,7 +30,7 @@ use zebra_chain::{ }; use crate::{ - request::SemanticallyVerifiedBlockWithTrees, + request::FinalizedBlock, service::finalized_state::{ disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk}, disk_format::{ @@ -39,7 +39,7 @@ use crate::{ }, zebra_db::{metrics::block_precommit_metrics, ZebraDb}, }, - BoxError, HashOrHeight, SemanticallyVerifiedBlock, + BoxError, HashOrHeight, }; #[cfg(test)] @@ -292,13 +292,12 @@ impl ZebraDb { /// - Propagates any errors from updating history and note commitment trees pub(in super::super) fn write_block( &mut self, - finalized: SemanticallyVerifiedBlockWithTrees, + finalized: FinalizedBlock, prev_note_commitment_trees: Option, network: Network, source: &str, ) -> Result { let tx_hash_indexes: HashMap = finalized - .verified .transaction_hashes .iter() .enumerate() @@ -311,12 +310,11 @@ impl ZebraDb { // simplify the spent_utxos location lookup code, // and remove the extra new_outputs_by_out_loc argument let new_outputs_by_out_loc: BTreeMap = finalized - .verified .new_outputs .iter() .map(|(outpoint, ordered_utxo)| { ( - lookup_out_loc(finalized.verified.height, outpoint, &tx_hash_indexes), + lookup_out_loc(finalized.height, outpoint, &tx_hash_indexes), ordered_utxo.utxo.clone(), ) }) @@ -325,7 +323,6 @@ impl ZebraDb { // Get a list of the spent UTXOs, before we delete any from the database let spent_utxos: Vec<(transparent::OutPoint, OutputLocation, transparent::Utxo)> = finalized - .verified .block .transactions .iter() @@ -337,13 +334,12 @@ impl ZebraDb { // Some utxos are spent in the same block, so they will be in // `tx_hash_indexes` and `new_outputs` self.output_location(&outpoint).unwrap_or_else(|| { - lookup_out_loc(finalized.verified.height, &outpoint, &tx_hash_indexes) + lookup_out_loc(finalized.height, &outpoint, &tx_hash_indexes) }), self.utxo(&outpoint) .map(|ordered_utxo| ordered_utxo.utxo) .or_else(|| { finalized - .verified .new_outputs .get(&outpoint) .map(|ordered_utxo| ordered_utxo.utxo.clone()) @@ -368,7 +364,6 @@ impl ZebraDb { .values() .chain( finalized - .verified .new_outputs .values() .map(|ordered_utxo| &ordered_utxo.utxo), @@ -403,7 +398,7 @@ impl ZebraDb { tracing::trace!(?source, "committed block from"); - Ok(finalized.verified.hash) + Ok(finalized.hash) } /// Writes the given batch to the database. @@ -446,7 +441,7 @@ impl DiskWriteBatch { &mut self, zebra_db: &ZebraDb, network: Network, - finalized: &SemanticallyVerifiedBlockWithTrees, + finalized: &FinalizedBlock, new_outputs_by_out_loc: BTreeMap, spent_utxos_by_outpoint: HashMap, spent_utxos_by_out_loc: BTreeMap, @@ -456,7 +451,7 @@ impl DiskWriteBatch { ) -> Result<(), BoxError> { let db = &zebra_db.db; // Commit block, transaction, and note commitment tree data. - self.prepare_block_header_and_transaction_data_batch(db, &finalized.verified)?; + self.prepare_block_header_and_transaction_data_batch(db, finalized)?; // The consensus rules are silent on shielded transactions in the genesis block, // because there aren't any in the mainnet or testnet genesis blocks. @@ -464,7 +459,7 @@ impl DiskWriteBatch { // which is already present from height 1 to the first shielded transaction. // // In Zebra we include the nullifiers and note commitments in the genesis block because it simplifies our code. - self.prepare_shielded_transaction_batch(db, &finalized.verified)?; + self.prepare_shielded_transaction_batch(db, finalized)?; self.prepare_trees_batch(zebra_db, finalized, prev_note_commitment_trees)?; // # Consensus @@ -477,12 +472,12 @@ impl DiskWriteBatch { // So we ignore the genesis UTXO, transparent address index, and value pool updates // for the genesis block. This also ignores genesis shielded value pool updates, but there // aren't any of those on mainnet or testnet. - if !finalized.verified.height.is_min() { + if !finalized.height.is_min() { // Commit transaction indexes self.prepare_transparent_transaction_batch( db, network, - &finalized.verified, + finalized, &new_outputs_by_out_loc, &spent_utxos_by_outpoint, &spent_utxos_by_out_loc, @@ -492,18 +487,14 @@ impl DiskWriteBatch { // Commit UTXOs and value pools self.prepare_chain_value_pools_batch( db, - &finalized.verified, + finalized, spent_utxos_by_outpoint, value_pool, )?; } // The block has passed contextual validation, so update the metrics - block_precommit_metrics( - &finalized.verified.block, - finalized.verified.hash, - finalized.verified.height, - ); + block_precommit_metrics(&finalized.block, finalized.hash, finalized.height); Ok(()) } @@ -518,7 +509,7 @@ impl DiskWriteBatch { pub fn prepare_block_header_and_transaction_data_batch( &mut self, db: &DiskDb, - finalized: &SemanticallyVerifiedBlock, + finalized: &FinalizedBlock, ) -> Result<(), BoxError> { // Blocks let block_header_by_height = db.cf_handle("block_header_by_height").unwrap(); @@ -530,7 +521,7 @@ impl DiskWriteBatch { let hash_by_tx_loc = db.cf_handle("hash_by_tx_loc").unwrap(); let tx_loc_by_hash = db.cf_handle("tx_loc_by_hash").unwrap(); - let SemanticallyVerifiedBlock { + let FinalizedBlock { block, hash, height, diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/vectors.rs b/zebra-state/src/service/finalized_state/zebra_db/block/tests/vectors.rs index 93db70f19a1..639ad7e5461 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block/tests/vectors.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/vectors.rs @@ -26,6 +26,7 @@ use zebra_chain::{ use zebra_test::vectors::{MAINNET_BLOCKS, TESTNET_BLOCKS}; use crate::{ + request::{FinalizedBlock, Treestate}, service::finalized_state::{disk_db::DiskWriteBatch, ZebraDb}, CheckpointVerifiedBlock, Config, }; @@ -108,7 +109,7 @@ fn test_block_db_round_trip_with( // Now, use the database let original_block = Arc::new(original_block); - let finalized = if original_block.coinbase_height().is_some() { + let checkpoint_verified = if original_block.coinbase_height().is_some() { original_block.clone().into() } else { // Fake a zero height @@ -119,6 +120,10 @@ fn test_block_db_round_trip_with( ) }; + let dummy_treestate = Treestate::default(); + let finalized = + FinalizedBlock::from_checkpoint_verified(checkpoint_verified, dummy_treestate); + // Skip validation by writing the block directly to the database let mut batch = DiskWriteBatch::new(); batch diff --git a/zebra-state/src/service/finalized_state/zebra_db/chain.rs b/zebra-state/src/service/finalized_state/zebra_db/chain.rs index 925762ce476..706950c5bee 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/chain.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/chain.rs @@ -19,12 +19,13 @@ use zebra_chain::{ }; use crate::{ + request::FinalizedBlock, service::finalized_state::{ disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk}, disk_format::RawBytes, zebra_db::ZebraDb, }, - BoxError, SemanticallyVerifiedBlock, + BoxError, }; impl ZebraDb { @@ -128,13 +129,13 @@ impl DiskWriteBatch { pub fn prepare_chain_value_pools_batch( &mut self, db: &DiskDb, - finalized: &SemanticallyVerifiedBlock, + finalized: &FinalizedBlock, utxos_spent_by_block: HashMap, value_pool: ValueBalance, ) -> Result<(), BoxError> { let tip_chain_value_pool = db.cf_handle("tip_chain_value_pool").unwrap(); - let SemanticallyVerifiedBlock { block, .. } = finalized; + let FinalizedBlock { block, .. } = finalized; let new_pool = value_pool.add_block(block.borrow(), &utxos_spent_by_block)?; self.zs_insert(&tip_chain_value_pool, (), new_pool); diff --git a/zebra-state/src/service/finalized_state/zebra_db/shielded.rs b/zebra-state/src/service/finalized_state/zebra_db/shielded.rs index a6733afa456..7ceaa6c453e 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/shielded.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/shielded.rs @@ -27,13 +27,13 @@ use zebra_chain::{ }; use crate::{ - request::SemanticallyVerifiedBlockWithTrees, + request::{FinalizedBlock, Treestate}, service::finalized_state::{ disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk}, disk_format::RawBytes, zebra_db::ZebraDb, }, - BoxError, SemanticallyVerifiedBlock, + BoxError, }; // Doc-only items @@ -436,9 +436,9 @@ impl DiskWriteBatch { pub fn prepare_shielded_transaction_batch( &mut self, db: &DiskDb, - finalized: &SemanticallyVerifiedBlock, + finalized: &FinalizedBlock, ) -> Result<(), BoxError> { - let SemanticallyVerifiedBlock { block, .. } = finalized; + let FinalizedBlock { block, .. } = finalized; // Index each transaction's shielded data for transaction in &block.transactions { @@ -491,11 +491,18 @@ impl DiskWriteBatch { pub fn prepare_trees_batch( &mut self, zebra_db: &ZebraDb, - finalized: &SemanticallyVerifiedBlockWithTrees, + finalized: &FinalizedBlock, prev_note_commitment_trees: Option, ) -> Result<(), BoxError> { - let height = finalized.verified.height; - let trees = finalized.treestate.note_commitment_trees.clone(); + let FinalizedBlock { + height, + treestate: + Treestate { + note_commitment_trees, + history_tree, + }, + .. + } = finalized; let prev_sprout_tree = prev_note_commitment_trees.as_ref().map_or_else( || zebra_db.sprout_tree_for_tip(), @@ -511,29 +518,29 @@ impl DiskWriteBatch { ); // Update the Sprout tree and store its anchor only if it has changed - if height.is_min() || prev_sprout_tree != trees.sprout { - self.update_sprout_tree(zebra_db, &trees.sprout) + if height.is_min() || prev_sprout_tree != note_commitment_trees.sprout { + self.update_sprout_tree(zebra_db, ¬e_commitment_trees.sprout) } // Store the Sapling tree, anchor, and any new subtrees only if they have changed - if height.is_min() || prev_sapling_tree != trees.sapling { - self.create_sapling_tree(zebra_db, &height, &trees.sapling); + if height.is_min() || prev_sapling_tree != note_commitment_trees.sapling { + self.create_sapling_tree(zebra_db, height, ¬e_commitment_trees.sapling); - if let Some(subtree) = trees.sapling_subtree { + if let Some(subtree) = note_commitment_trees.sapling_subtree { self.insert_sapling_subtree(zebra_db, &subtree); } } // Store the Orchard tree, anchor, and any new subtrees only if they have changed - if height.is_min() || prev_orchard_tree != trees.orchard { - self.create_orchard_tree(zebra_db, &height, &trees.orchard); + if height.is_min() || prev_orchard_tree != note_commitment_trees.orchard { + self.create_orchard_tree(zebra_db, height, ¬e_commitment_trees.orchard); - if let Some(subtree) = trees.orchard_subtree { + if let Some(subtree) = note_commitment_trees.orchard_subtree { self.insert_orchard_subtree(zebra_db, &subtree); } } - self.update_history_tree(zebra_db, &finalized.treestate.history_tree); + self.update_history_tree(zebra_db, history_tree); Ok(()) } diff --git a/zebra-state/src/service/finalized_state/zebra_db/transparent.rs b/zebra-state/src/service/finalized_state/zebra_db/transparent.rs index a9caed75c46..e9b41639f18 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/transparent.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/transparent.rs @@ -25,6 +25,7 @@ use zebra_chain::{ }; use crate::{ + request::FinalizedBlock, service::finalized_state::{ disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk}, disk_format::{ @@ -36,7 +37,7 @@ use crate::{ }, zebra_db::ZebraDb, }, - BoxError, SemanticallyVerifiedBlock, + BoxError, }; impl ZebraDb { @@ -347,13 +348,13 @@ impl DiskWriteBatch { &mut self, db: &DiskDb, network: Network, - finalized: &SemanticallyVerifiedBlock, + finalized: &FinalizedBlock, new_outputs_by_out_loc: &BTreeMap, spent_utxos_by_outpoint: &HashMap, spent_utxos_by_out_loc: &BTreeMap, mut address_balances: HashMap, ) -> Result<(), BoxError> { - let SemanticallyVerifiedBlock { block, height, .. } = finalized; + let FinalizedBlock { block, height, .. } = finalized; // Update created and spent transparent outputs self.prepare_new_transparent_outputs_batch( diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index 6cd966fd5a4..2fe3a06a1a4 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-test" -version = "1.0.0-beta.30" +version = "1.0.0-beta.31" authors = ["Zcash Foundation "] description = "Test harnesses and test vectors for Zebra" license = "MIT OR Apache-2.0" diff --git a/zebra-test/src/lib.rs b/zebra-test/src/lib.rs index f0d0090bace..196b345d887 100644 --- a/zebra-test/src/lib.rs +++ b/zebra-test/src/lib.rs @@ -1,7 +1,7 @@ //! Miscellaneous test code for Zebra. #![doc(html_favicon_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-favicon-128.png")] #![doc(html_logo_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-icon.png")] -#![doc(html_root_url = "https://doc.zebra.zfnd.org/zebra_test")] +#![doc(html_root_url = "https://docs.rs/zebra_test")] // Each lazy_static variable uses additional recursion #![recursion_limit = "512"] diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index ba116aacdaf..6ff4c0c82a5 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-utils" -version = "1.0.0-beta.30" +version = "1.0.0-beta.31" authors = ["Zcash Foundation "] description = "Developer tools for Zebra maintenance and testing" license = "MIT OR Apache-2.0" @@ -74,11 +74,11 @@ tracing-error = "0.2.0" tracing-subscriber = "0.3.17" thiserror = "1.0.48" -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.30" } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.30" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.31" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.31" } # These crates are needed for the block-template-to-proposal binary -zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.30", optional = true } +zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.31", optional = true } # These crates are needed for the zebra-checkpoints binary itertools = { version = "0.11.0", optional = true } diff --git a/zebra-utils/README.md b/zebra-utils/README.md index 77b5c480202..73e89c25a48 100644 --- a/zebra-utils/README.md +++ b/zebra-utils/README.md @@ -39,10 +39,10 @@ To create checkpoints, you need a synchronized instance of `zebrad` or `zcashd`. #### Checkpoint Generation Setup -Make sure your `zebrad` or `zcashd` is [listening for RPC requests](https://doc.zebra.zfnd.org/zebra_rpc/config/struct.Config.html#structfield.listen_addr), +Make sure your `zebrad` or `zcashd` is [listening for RPC requests](https://doc-internal.zebra.zfnd.org/zebra_rpc/config/struct.Config.html#structfield.listen_addr), and synced to the network tip. -If you are on a Debian system, `zcash-cli` [can be installed as a package](https://zcash.readthedocs.io/en/latest/rtd_pages/install_debian_bin_packages.html). +If you are on a Debian system, `zcash-cli` [can be installed as a package](https://zcash.readthedocs.io/en/master/rtd_pages/install_debian_bin_packages.html). `zebra-checkpoints` is a standalone rust binary, you can compile it using: @@ -81,7 +81,7 @@ For more details about checkpoint lists, see the [`zebra-checkpoints` README.](h To update the testnet checkpoints, `zebra-checkpoints` needs to connect to a testnet node. To launch a testnet node, you can either: -- start `zebrad` [with a `zebrad.toml` with `network.network` set to `Testnet`](https://doc.zebra.zfnd.org/zebra_network/struct.Config.html#structfield.network), or +- start `zebrad` [with a `zebrad.toml` with `network.network` set to `Testnet`](https://docs.rs/zebra-network/latest/zebra_network/struct.Config.html#structfield.network), or - run `zcashd -testnet`. Then use the commands above to regenerate the checkpoints. diff --git a/zebra-utils/src/lib.rs b/zebra-utils/src/lib.rs index 207e094939a..ea987abf6bd 100644 --- a/zebra-utils/src/lib.rs +++ b/zebra-utils/src/lib.rs @@ -1,7 +1,7 @@ //! Utilities for Zebra development, not for library or application users. #![doc(html_favicon_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-favicon-128.png")] #![doc(html_logo_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-icon.png")] -#![doc(html_root_url = "https://doc.zebra.zfnd.org/zebra_utils")] +#![doc(html_root_url = "https://docs.rs/zebra_utils")] use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 871d4536653..ae2bc074a9c 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -1,7 +1,7 @@ [package] # Crate metadata name = "zebrad" -version = "1.3.0" +version = "1.4.0" authors = ["Zcash Foundation "] description = "The Zcash Foundation's independent, consensus-compatible implementation of a Zcash node" license = "MIT OR Apache-2.0" @@ -145,15 +145,15 @@ test_sync_past_mandatory_checkpoint_mainnet = [] test_sync_past_mandatory_checkpoint_testnet = [] [dependencies] -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.30" } -zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.30" } -zebra-network = { path = "../zebra-network", version = "1.0.0-beta.30" } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.30" } -zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.30" } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.30" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.31" } +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.31" } +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.31" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.31" } +zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.31" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.31" } # Required for crates.io publishing, but it's only used in tests -zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.30", optional = true } +zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.31", optional = true } abscissa_core = "0.7.0" clap = { version = "4.4.7", features = ["cargo"] } @@ -168,6 +168,7 @@ toml = "0.8.3" futures = "0.3.29" rayon = "1.7.0" tokio = { version = "1.33.0", features = ["time", "rt-multi-thread", "macros", "tracing", "signal"] } +tokio-stream = { version = "0.1.14", features = ["time"] } tower = { version = "0.4.13", features = ["hedge", "limit"] } pin-project = "1.1.3" @@ -226,7 +227,7 @@ proptest-derive = { version = "0.4.0", optional = true } console-subscriber = { version = "0.2.0", optional = true } [build-dependencies] -vergen = { version = "8.2.5", default-features = false, features = ["cargo", "git", "git2", "rustc"] } +vergen = { version = "8.2.6", default-features = false, features = ["cargo", "git", "git2", "rustc"] } # test feature lightwalletd-grpc-tests tonic-build = { version = "0.10.2", optional = true } diff --git a/zebrad/src/commands/generate.rs b/zebrad/src/commands/generate.rs index de9a3019c53..8d0ddf52e42 100644 --- a/zebrad/src/commands/generate.rs +++ b/zebrad/src/commands/generate.rs @@ -37,7 +37,7 @@ impl Runnable for GenerateCmd { # # The config format (including a complete list of sections and fields) is # documented here: -# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html # # zebrad attempts to load configs in the following order: # diff --git a/zebrad/src/components/inbound.rs b/zebrad/src/components/inbound.rs index e93aa8517f0..f25b461f812 100644 --- a/zebrad/src/components/inbound.rs +++ b/zebrad/src/components/inbound.rs @@ -9,7 +9,7 @@ use std::{ collections::HashSet, future::Future, pin::Pin, - sync::Arc, + sync::{Arc, TryLockError}, task::{Context, Poll}, time::Duration, }; @@ -278,7 +278,11 @@ impl Service for Inbound { } } Err(TryRecvError::Empty) => { - // There's no setup data yet, so keep waiting for it + // There's no setup data yet, so keep waiting for it. + // + // We could use Future::poll() to get a waker and return Poll::Pending here. + // But we want to drop excess requests during startup instead. Otherwise, + // the inbound service gets overloaded, and starts disconnecting peers. result = Ok(()); Setup::Pending { full_verify_concurrency_limit, @@ -307,6 +311,11 @@ impl Service for Inbound { mempool, state, } => { + // # Correctness + // + // Clear the stream but ignore the final Pending return value. + // If we returned Pending here, and there were no waiting block downloads, + // then inbound requests would wait for the next block download, and hang forever. while let Poll::Ready(Some(_)) = block_downloads.as_mut().poll_next(cx) {} result = Ok(()); @@ -366,20 +375,35 @@ impl Service for Inbound { // // # Correctness // - // Briefly hold the address book threaded mutex while - // cloning the address book. Then sanitize in the future, - // after releasing the lock. - let peers = address_book.lock().unwrap().clone(); + // If the address book is busy, try again inside the future. If it can't be locked + // twice, ignore the request. + let address_book = address_book.clone(); + + let get_peers = move || match address_book.try_lock() { + Ok(address_book) => Some(address_book.clone()), + Err(TryLockError::WouldBlock) => None, + Err(TryLockError::Poisoned(_)) => panic!("previous thread panicked while holding the address book lock"), + }; + + let peers = get_peers(); async move { - // Correctness: get the current time after acquiring the address book lock. + // Correctness: get the current time inside the future. // - // This time is used to filter outdated peers, so it doesn't really matter + // This time is used to filter outdated peers, so it doesn't matter much // if we get it when the future is created, or when it starts running. let now = Utc::now(); + // If we didn't get the peers when the future was created, wait for other tasks + // to run, then try again when the future first runs. + if peers.is_none() { + tokio::task::yield_now().await; + } + let peers = peers.or_else(get_peers); + let is_busy = peers.is_none(); + // Send a sanitized response - let mut peers = peers.sanitized(now); + let mut peers = peers.map_or_else(Vec::new, |peers| peers.sanitized(now)); // Truncate the list let address_limit = div_ceil(peers.len(), ADDR_RESPONSE_LIMIT_DENOMINATOR); @@ -387,8 +411,20 @@ impl Service for Inbound { peers.truncate(address_limit); if peers.is_empty() { - // We don't know if the peer response will be empty until we've sanitized them. - debug!("ignoring `Peers` request from remote peer because our address book is empty"); + // Sometimes we don't know if the peer response will be empty until we've + // sanitized them. + if is_busy { + info!( + "ignoring `Peers` request from remote peer because our address \ + book is busy" + ); + } else { + debug!( + "ignoring `Peers` request from remote peer because our address \ + book has no available peers" + ); + } + Ok(zn::Response::Nil) } else { Ok(zn::Response::Peers(peers)) diff --git a/zebrad/src/components/mempool.rs b/zebrad/src/components/mempool.rs index aef623e45fa..0e9aa2d382d 100644 --- a/zebrad/src/components/mempool.rs +++ b/zebrad/src/components/mempool.rs @@ -22,12 +22,13 @@ use std::{ collections::HashSet, future::Future, iter, - pin::Pin, + pin::{pin, Pin}, task::{Context, Poll}, }; use futures::{future::FutureExt, stream::Stream}; use tokio::sync::broadcast; +use tokio_stream::StreamExt; use tower::{buffer::Buffer, timeout::Timeout, util::BoxService, Service}; use zebra_chain::{ @@ -42,7 +43,7 @@ use zebra_node_services::mempool::{Gossip, Request, Response}; use zebra_state as zs; use zebra_state::{ChainTipChange, TipAction}; -use crate::components::sync::SyncStatus; +use crate::components::{mempool::crawler::RATE_LIMIT_DELAY, sync::SyncStatus}; pub mod config; mod crawler; @@ -580,9 +581,11 @@ impl Service for Mempool { let best_tip_height = self.latest_chain_tip.best_tip_height(); // Clean up completed download tasks and add to mempool if successful. - while let Poll::Ready(Some(r)) = tx_downloads.as_mut().poll_next(cx) { + while let Poll::Ready(Some(r)) = + pin!(tx_downloads.timeout(RATE_LIMIT_DELAY)).poll_next(cx) + { match r { - Ok((tx, expected_tip_height)) => { + Ok(Ok((tx, expected_tip_height))) => { // # Correctness: // // It's okay to use tip height here instead of the tip hash since @@ -609,12 +612,20 @@ impl Service for Mempool { tx_downloads.download_if_needed_and_verify(tx.transaction.into()); } } - Err((txid, error)) => { + Ok(Err((txid, error))) => { tracing::debug!(?txid, ?error, "mempool transaction failed to verify"); metrics::counter!("mempool.failed.verify.tasks.total", 1, "reason" => error.to_string()); storage.reject_if_needed(txid, error); } + Err(_elapsed) => { + // A timeout happens when the stream hangs waiting for another service, + // so there is no specific transaction ID. + + tracing::info!("mempool transaction failed to verify due to timeout"); + + metrics::counter!("mempool.failed.verify.tasks.total", 1, "reason" => "timeout"); + } }; } diff --git a/zebrad/src/components/mempool/crawler.rs b/zebrad/src/components/mempool/crawler.rs index 610691f79ee..6d1129e2b30 100644 --- a/zebrad/src/components/mempool/crawler.rs +++ b/zebrad/src/components/mempool/crawler.rs @@ -50,7 +50,11 @@ use std::{collections::HashSet, time::Duration}; use futures::{future, pin_mut, stream::FuturesUnordered, StreamExt}; -use tokio::{sync::watch, task::JoinHandle, time::sleep}; +use tokio::{ + sync::watch, + task::JoinHandle, + time::{sleep, timeout}, +}; use tower::{timeout::Timeout, BoxError, Service, ServiceExt}; use tracing_futures::Instrument; @@ -77,7 +81,7 @@ const FANOUT: usize = 3; /// /// Using a prime number makes sure that mempool crawler fanouts /// don't synchronise with other crawls. -const RATE_LIMIT_DELAY: Duration = Duration::from_secs(73); +pub const RATE_LIMIT_DELAY: Duration = Duration::from_secs(73); /// The time to wait for a peer response. /// @@ -191,7 +195,14 @@ where loop { self.wait_until_enabled().await?; - self.crawl_transactions().await?; + // Avoid hangs when the peer service is not ready, or due to bugs in async code. + timeout(RATE_LIMIT_DELAY, self.crawl_transactions()) + .await + .unwrap_or_else(|timeout| { + // Temporary errors just get logged and ignored. + info!("mempool crawl timed out: {timeout:?}"); + Ok(()) + })?; sleep(RATE_LIMIT_DELAY).await; } } diff --git a/zebrad/src/components/sync.rs b/zebrad/src/components/sync.rs index 9dc6da28589..db98a1de8e3 100644 --- a/zebrad/src/components/sync.rs +++ b/zebrad/src/components/sync.rs @@ -2,13 +2,17 @@ //! //! It is used when Zebra is a long way behind the current chain tip. -use std::{cmp::max, collections::HashSet, pin::Pin, task::Poll, time::Duration}; +use std::{cmp::max, collections::HashSet, convert, pin::Pin, task::Poll, time::Duration}; use color_eyre::eyre::{eyre, Report}; use futures::stream::{FuturesUnordered, StreamExt}; use indexmap::IndexSet; use serde::{Deserialize, Serialize}; -use tokio::{sync::watch, task::JoinError, time::sleep}; +use tokio::{ + sync::watch, + task::JoinError, + time::{sleep, timeout}, +}; use tower::{ builder::ServiceBuilder, hedge::Hedge, limit::ConcurrencyLimit, retry::Retry, timeout::Timeout, Service, ServiceExt, @@ -210,7 +214,10 @@ const SYNC_RESTART_DELAY: Duration = Duration::from_secs(67); /// If this timeout is removed (or set too low), Zebra will immediately retry /// to download and verify the genesis block from its peers. This can cause /// a denial of service on those peers. -const GENESIS_TIMEOUT_RETRY: Duration = Duration::from_secs(5); +/// +/// If this timeout is too short, old or buggy nodes will keep making useless +/// network requests. If there are a lot of them, it could overwhelm the network. +const GENESIS_TIMEOUT_RETRY: Duration = Duration::from_secs(10); /// Sync configuration section. #[derive(Clone, Debug, Deserialize, Serialize)] @@ -541,7 +548,8 @@ where /// following a fork. Either way, Zebra should attempt to obtain some more tips. /// /// Returns `Err` if there was an unrecoverable error and restarting the synchronization is - /// necessary. + /// necessary. This includes outer timeouts, where an entire syncing step takes an extremely + /// long time. (These usually indicate hangs.) #[instrument(skip(self))] async fn try_to_sync(&mut self) -> Result<(), Report> { self.prospective_tips = HashSet::new(); @@ -550,76 +558,106 @@ where state_tip = ?self.latest_chain_tip.best_tip_height(), "starting sync, obtaining new tips" ); - let mut extra_hashes = self.obtain_tips().await.map_err(|e| { - info!("temporary error obtaining tips: {:#}", e); - e - })?; + let mut extra_hashes = timeout(SYNC_RESTART_DELAY, self.obtain_tips()) + .await + .map_err(Into::into) + // TODO: replace with flatten() when it stabilises (#70142) + .and_then(convert::identity) + .map_err(|e| { + info!("temporary error obtaining tips: {:#}", e); + e + })?; self.update_metrics(); while !self.prospective_tips.is_empty() || !extra_hashes.is_empty() { - // Check whether any block tasks are currently ready: - while let Poll::Ready(Some(rsp)) = futures::poll!(self.downloads.next()) { - self.handle_block_response(rsp)?; - } - self.update_metrics(); + // Avoid hangs due to service readiness or other internal operations + extra_hashes = timeout(BLOCK_VERIFY_TIMEOUT, self.try_to_sync_once(extra_hashes)) + .await + .map_err(Into::into) + // TODO: replace with flatten() when it stabilises (#70142) + .and_then(convert::identity)?; + } - // Pause new downloads while the syncer or downloader are past their lookahead limits. - // - // To avoid a deadlock or long waits for blocks to expire, we ignore the download - // lookahead limit when there are only a small number of blocks waiting. - while self.downloads.in_flight() >= self.lookahead_limit(extra_hashes.len()) - || (self.downloads.in_flight() >= self.lookahead_limit(extra_hashes.len()) / 2 - && self.past_lookahead_limit_receiver.cloned_watch_data()) - { - trace!( - tips.len = self.prospective_tips.len(), - in_flight = self.downloads.in_flight(), - extra_hashes = extra_hashes.len(), - lookahead_limit = self.lookahead_limit(extra_hashes.len()), - state_tip = ?self.latest_chain_tip.best_tip_height(), - "waiting for pending blocks", - ); + info!("exhausted prospective tip set"); - let response = self.downloads.next().await.expect("downloads is nonempty"); + Ok(()) + } - self.handle_block_response(response)?; - self.update_metrics(); - } + /// Tries to synchronize the chain once, using the existing `extra_hashes`. + /// + /// Tries to extend the existing tips and download the missing blocks. + /// + /// Returns `Ok(extra_hashes)` if it was able to extend once and synchronize sone of the chain. + /// Returns `Err` if there was an unrecoverable error and restarting the synchronization is + /// necessary. + #[instrument(skip(self))] + async fn try_to_sync_once( + &mut self, + mut extra_hashes: IndexSet, + ) -> Result, Report> { + // Check whether any block tasks are currently ready. + while let Poll::Ready(Some(rsp)) = futures::poll!(self.downloads.next()) { + // Some temporary errors are ignored, and syncing continues with other blocks. + // If it turns out they were actually important, syncing will run out of blocks, and + // the syncer will reset itself. + self.handle_block_response(rsp)?; + } + self.update_metrics(); - // Once we're below the lookahead limit, we can request more blocks or hashes. - if !extra_hashes.is_empty() { - debug!( - tips.len = self.prospective_tips.len(), - in_flight = self.downloads.in_flight(), - extra_hashes = extra_hashes.len(), - lookahead_limit = self.lookahead_limit(extra_hashes.len()), - state_tip = ?self.latest_chain_tip.best_tip_height(), - "requesting more blocks", - ); + // Pause new downloads while the syncer or downloader are past their lookahead limits. + // + // To avoid a deadlock or long waits for blocks to expire, we ignore the download + // lookahead limit when there are only a small number of blocks waiting. + while self.downloads.in_flight() >= self.lookahead_limit(extra_hashes.len()) + || (self.downloads.in_flight() >= self.lookahead_limit(extra_hashes.len()) / 2 + && self.past_lookahead_limit_receiver.cloned_watch_data()) + { + trace!( + tips.len = self.prospective_tips.len(), + in_flight = self.downloads.in_flight(), + extra_hashes = extra_hashes.len(), + lookahead_limit = self.lookahead_limit(extra_hashes.len()), + state_tip = ?self.latest_chain_tip.best_tip_height(), + "waiting for pending blocks", + ); - let response = self.request_blocks(extra_hashes).await; - extra_hashes = Self::handle_hash_response(response)?; - } else { - info!( - tips.len = self.prospective_tips.len(), - in_flight = self.downloads.in_flight(), - extra_hashes = extra_hashes.len(), - lookahead_limit = self.lookahead_limit(extra_hashes.len()), - state_tip = ?self.latest_chain_tip.best_tip_height(), - "extending tips", - ); + let response = self.downloads.next().await.expect("downloads is nonempty"); - extra_hashes = self.extend_tips().await.map_err(|e| { - info!("temporary error extending tips: {:#}", e); - e - })?; - } + self.handle_block_response(response)?; self.update_metrics(); } - info!("exhausted prospective tip set"); + // Once we're below the lookahead limit, we can request more blocks or hashes. + if !extra_hashes.is_empty() { + debug!( + tips.len = self.prospective_tips.len(), + in_flight = self.downloads.in_flight(), + extra_hashes = extra_hashes.len(), + lookahead_limit = self.lookahead_limit(extra_hashes.len()), + state_tip = ?self.latest_chain_tip.best_tip_height(), + "requesting more blocks", + ); - Ok(()) + let response = self.request_blocks(extra_hashes).await; + extra_hashes = Self::handle_hash_response(response)?; + } else { + info!( + tips.len = self.prospective_tips.len(), + in_flight = self.downloads.in_flight(), + extra_hashes = extra_hashes.len(), + lookahead_limit = self.lookahead_limit(extra_hashes.len()), + state_tip = ?self.latest_chain_tip.best_tip_height(), + "extending tips", + ); + + extra_hashes = self.extend_tips().await.map_err(|e| { + info!("temporary error extending tips: {:#}", e); + e + })?; + } + self.update_metrics(); + + Ok(extra_hashes) } /// Given a block_locator list fan out request for subsequent hashes to @@ -932,16 +970,19 @@ where while !self.state_contains(self.genesis_hash).await? { info!("starting genesis block download and verify"); - let response = self.downloads.download_and_verify(self.genesis_hash).await; - Self::handle_response(response).map_err(|e| eyre!(e))?; - - let response = self.downloads.next().await.expect("downloads is nonempty"); + let response = timeout(SYNC_RESTART_DELAY, self.request_genesis_once()) + .await + .map_err(Into::into); + // 3 layers of results is not ideal, but we need the timeout on the outside. match response { - Ok(response) => self + Ok(Ok(Ok(response))) => self .handle_block_response(Ok(response)) .expect("never returns Err for Ok"), - Err(error) => { + // Handle fatal errors + Ok(Err(fatal_error)) => Err(fatal_error)?, + // Handle timeouts and block errors + Err(error) | Ok(Ok(Err(error))) => { // TODO: exit syncer on permanent service errors (NetworkError, VerifierError) if Self::should_restart_sync(&error) { warn!( @@ -963,6 +1004,20 @@ where Ok(()) } + /// Try to download and verify the genesis block once. + /// + /// Fatal errors are returned in the outer result, temporary errors in the inner one. + async fn request_genesis_once( + &mut self, + ) -> Result, Report> { + let response = self.downloads.download_and_verify(self.genesis_hash).await; + Self::handle_response(response).map_err(|e| eyre!(e))?; + + let response = self.downloads.next().await.expect("downloads is nonempty"); + + Ok(response) + } + /// Queue download and verify tasks for each block that isn't currently known to our node. /// /// TODO: turn obtain and extend tips into a separate task, which sends hashes via a channel? diff --git a/zebrad/src/components/sync/downloads.rs b/zebrad/src/components/sync/downloads.rs index f43d8005280..c3322d0890c 100644 --- a/zebrad/src/components/sync/downloads.rs +++ b/zebrad/src/components/sync/downloads.rs @@ -4,7 +4,7 @@ use std::{ collections::HashMap, convert::{self, TryFrom}, pin::Pin, - sync::{Arc, TryLockError}, + sync::Arc, task::{Context, Poll}, }; @@ -154,6 +154,17 @@ pub enum BlockDownloadVerifyError { height: block::Height, hash: block::Hash, }, + + #[error( + "timeout during service readiness, download, verification, or internal downloader operation" + )] + Timeout, +} + +impl From for BlockDownloadVerifyError { + fn from(_value: tokio::time::error::Elapsed) -> Self { + BlockDownloadVerifyError::Timeout + } } /// Represents a [`Stream`] of download and verification tasks during chain sync. @@ -471,17 +482,12 @@ where metrics::counter!("sync.max.height.limit.paused.count", 1); } else if block_height <= lookahead_reset_height && past_lookahead_limit_receiver.cloned_watch_data() { - // Try to reset the watched value to false, since we're well under the limit. - match past_lookahead_limit_sender.try_lock() { - Ok(watch_sender_guard) => { - // If Zebra is shutting down, ignore the send error. - let _ = watch_sender_guard.send(true); - metrics::counter!("sync.max.height.limit.reset.count", 1); - }, - Err(TryLockError::Poisoned(_)) => panic!("thread panicked while holding the past_lookahead_limit_sender mutex guard"), - // We'll try allowing new downloads when we get the next block - Err(TryLockError::WouldBlock) => {} - } + // Reset the watched value to false, since we're well under the limit. + // We need to block here, because if we don't the syncer can hang. + + // But if Zebra is shutting down, ignore the send error. + let _ = past_lookahead_limit_sender.lock().expect("thread panicked while holding the past_lookahead_limit_sender mutex guard").send(false); + metrics::counter!("sync.max.height.limit.reset.count", 1); metrics::counter!("sync.max.height.limit.reset.attempt.count", 1); } @@ -571,14 +577,26 @@ where pub fn cancel_all(&mut self) { // Replace the pending task list with an empty one and drop it. let _ = std::mem::take(&mut self.pending); + // Signal cancellation to all running tasks. // Since we already dropped the JoinHandles above, they should // fail silently. for (_hash, cancel) in self.cancel_handles.drain() { let _ = cancel.send(()); } + assert!(self.pending.is_empty()); assert!(self.cancel_handles.is_empty()); + + // Set the lookahead limit to false, since we're empty (so we're under the limit). + // + // It is ok to block here, because we're doing a reset and sleep anyway. + // But if Zebra is shutting down, ignore the send error. + let _ = self + .past_lookahead_limit_sender + .lock() + .expect("thread panicked while holding the past_lookahead_limit_sender mutex guard") + .send(false); } /// Get the number of currently in-flight download and verify tasks. diff --git a/zebrad/src/components/sync/end_of_support.rs b/zebrad/src/components/sync/end_of_support.rs index f7ca757d2b3..0b615c1f050 100644 --- a/zebrad/src/components/sync/end_of_support.rs +++ b/zebrad/src/components/sync/end_of_support.rs @@ -13,7 +13,7 @@ use zebra_chain::{ use crate::application::release_version; /// The estimated height that this release will be published. -pub const ESTIMATED_RELEASE_HEIGHT: u32 = 2_264_000; +pub const ESTIMATED_RELEASE_HEIGHT: u32 = 2_290_000; /// The maximum number of days after `ESTIMATED_RELEASE_HEIGHT` where a Zebra server will run /// without halting. diff --git a/zebrad/src/lib.rs b/zebrad/src/lib.rs index b36ca2e182a..0395d47c24e 100644 --- a/zebrad/src/lib.rs +++ b/zebrad/src/lib.rs @@ -6,7 +6,10 @@ //! which implement the different components of a Zcash node //! (networking, chain structures, validation, rpc, etc). //! -//! [Rendered docs from the `main` branch](https://doc.zebra.zfnd.org). +//! [Rendered docs for the latest crate releases](https://docs.rs/releases/search?query=zebra). +//! +//! [Rendered docs from the `main` branch](https://doc-internal.zebra.zfnd.org). +//! //! [Join us on the Zcash Foundation Engineering Discord](https://discord.gg/na6QZNd). //! //! ## About Zcash @@ -102,7 +105,7 @@ #![doc(html_favicon_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-favicon-128.png")] #![doc(html_logo_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-icon.png")] -#![doc(html_root_url = "https://doc.zebra.zfnd.org/zebrad")] +#![doc(html_root_url = "https://docs.rs/zebrad")] // Tracing causes false positives on this lint: // https://github.com/tokio-rs/tracing/issues/553 #![allow(clippy::cognitive_complexity)] diff --git a/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.1.toml b/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.1.toml index b64dc7ed15e..1472bd432cd 100644 --- a/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.1.toml +++ b/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.1.toml @@ -12,7 +12,7 @@ # # The config format (including a complete list of sections and fields) is # documented here: -# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html # # zebrad attempts to load configs in the following order: # diff --git a/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.2.toml b/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.2.toml index ba578bd3f00..3397a033e6b 100644 --- a/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.2.toml +++ b/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.2.toml @@ -12,7 +12,7 @@ # # The config format (including a complete list of sections and fields) is # documented here: -# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html # # zebrad attempts to load configs in the following order: # diff --git a/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.3.toml b/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.3.toml index 379c491bf6a..e5ce9a5cfb3 100644 --- a/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.3.toml +++ b/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.3.toml @@ -12,7 +12,7 @@ # # The config format (including a complete list of sections and fields) is # documented here: -# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html # # zebrad attempts to load configs in the following order: # diff --git a/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.4.toml b/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.4.toml index 85406147154..f3f1d74bb79 100644 --- a/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.4.toml +++ b/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.4.toml @@ -12,7 +12,7 @@ # # The config format (including a complete list of sections and fields) is # documented here: -# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html # # zebrad attempts to load configs in the following order: # diff --git a/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.5.toml b/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.5.toml index 87ed3dad81e..a22a6b4e1f0 100644 --- a/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.5.toml +++ b/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.5.toml @@ -12,7 +12,7 @@ # # The config format (including a complete list of sections and fields) is # documented here: -# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html # # zebrad attempts to load configs in the following order: # diff --git a/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.9.toml b/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.9.toml index c6629087b72..ec7fe819bd0 100644 --- a/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.9.toml +++ b/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.9.toml @@ -12,7 +12,7 @@ # # The config format (including a complete list of sections and fields) is # documented here: -# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html # # zebrad attempts to load configs in the following order: # diff --git a/zebrad/tests/common/configs/getblocktemplate-v1.0.1.toml b/zebrad/tests/common/configs/getblocktemplate-v1.0.1.toml index 3536c80c9c8..5375f74e7b1 100644 --- a/zebrad/tests/common/configs/getblocktemplate-v1.0.1.toml +++ b/zebrad/tests/common/configs/getblocktemplate-v1.0.1.toml @@ -12,7 +12,7 @@ # # The config format (including a complete list of sections and fields) is # documented here: -# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html # # zebrad attempts to load configs in the following order: # diff --git a/zebrad/tests/common/configs/v1.0.0-beta.12.toml b/zebrad/tests/common/configs/v1.0.0-beta.12.toml index 32421cf0bd2..86d8718877c 100644 --- a/zebrad/tests/common/configs/v1.0.0-beta.12.toml +++ b/zebrad/tests/common/configs/v1.0.0-beta.12.toml @@ -12,7 +12,7 @@ # # The config format (including a complete list of sections and fields) is # documented here: -# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html # # zebrad attempts to load configs in the following order: # diff --git a/zebrad/tests/common/configs/v1.0.0-beta.13.toml b/zebrad/tests/common/configs/v1.0.0-beta.13.toml index 14dc310ac59..9477f2f5deb 100644 --- a/zebrad/tests/common/configs/v1.0.0-beta.13.toml +++ b/zebrad/tests/common/configs/v1.0.0-beta.13.toml @@ -12,7 +12,7 @@ # # The config format (including a complete list of sections and fields) is # documented here: -# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html # # zebrad attempts to load configs in the following order: # diff --git a/zebrad/tests/common/configs/v1.0.0-beta.15.toml b/zebrad/tests/common/configs/v1.0.0-beta.15.toml index c38cc2530e9..5a10b5bdcbf 100644 --- a/zebrad/tests/common/configs/v1.0.0-beta.15.toml +++ b/zebrad/tests/common/configs/v1.0.0-beta.15.toml @@ -12,7 +12,7 @@ # # The config format (including a complete list of sections and fields) is # documented here: -# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html # # zebrad attempts to load configs in the following order: # diff --git a/zebrad/tests/common/configs/v1.0.0-rc.0.toml b/zebrad/tests/common/configs/v1.0.0-rc.0.toml index 72f8c6bc693..7363d588b70 100644 --- a/zebrad/tests/common/configs/v1.0.0-rc.0.toml +++ b/zebrad/tests/common/configs/v1.0.0-rc.0.toml @@ -12,7 +12,7 @@ # # The config format (including a complete list of sections and fields) is # documented here: -# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html # # zebrad attempts to load configs in the following order: # diff --git a/zebrad/tests/common/configs/v1.0.0-rc.2.toml b/zebrad/tests/common/configs/v1.0.0-rc.2.toml index 8622db79f88..3ebf86709f3 100644 --- a/zebrad/tests/common/configs/v1.0.0-rc.2.toml +++ b/zebrad/tests/common/configs/v1.0.0-rc.2.toml @@ -12,7 +12,7 @@ # # The config format (including a complete list of sections and fields) is # documented here: -# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html # # zebrad attempts to load configs in the following order: # diff --git a/zebrad/tests/common/configs/v1.0.0-rc.4.toml b/zebrad/tests/common/configs/v1.0.0-rc.4.toml index de63dd90d02..5dfc7d14391 100644 --- a/zebrad/tests/common/configs/v1.0.0-rc.4.toml +++ b/zebrad/tests/common/configs/v1.0.0-rc.4.toml @@ -12,7 +12,7 @@ # # The config format (including a complete list of sections and fields) is # documented here: -# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html # # zebrad attempts to load configs in the following order: # diff --git a/zebrad/tests/common/configs/v1.0.0-rc.9.toml b/zebrad/tests/common/configs/v1.0.0-rc.9.toml index 52cd503be0b..5a90632ea92 100644 --- a/zebrad/tests/common/configs/v1.0.0-rc.9.toml +++ b/zebrad/tests/common/configs/v1.0.0-rc.9.toml @@ -12,7 +12,7 @@ # # The config format (including a complete list of sections and fields) is # documented here: -# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html # # zebrad attempts to load configs in the following order: # diff --git a/zebrad/tests/common/configs/v1.0.1.toml b/zebrad/tests/common/configs/v1.0.1.toml index 02bac53da62..7c94c32c9b2 100644 --- a/zebrad/tests/common/configs/v1.0.1.toml +++ b/zebrad/tests/common/configs/v1.0.1.toml @@ -12,7 +12,7 @@ # # The config format (including a complete list of sections and fields) is # documented here: -# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html # # zebrad attempts to load configs in the following order: # diff --git a/zebrad/tests/common/configs/v1.3.0-progress-bars.toml b/zebrad/tests/common/configs/v1.3.0-progress-bars.toml index 82da5039fd8..a5801ba802b 100644 --- a/zebrad/tests/common/configs/v1.3.0-progress-bars.toml +++ b/zebrad/tests/common/configs/v1.3.0-progress-bars.toml @@ -12,7 +12,7 @@ # # The config format (including a complete list of sections and fields) is # documented here: -# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html # # zebrad attempts to load configs in the following order: # diff --git a/zebrad/tests/common/configs/v1.4.0.toml b/zebrad/tests/common/configs/v1.4.0.toml index 0879f87c44f..26ee79452b5 100644 --- a/zebrad/tests/common/configs/v1.4.0.toml +++ b/zebrad/tests/common/configs/v1.4.0.toml @@ -12,7 +12,7 @@ # # The config format (including a complete list of sections and fields) is # documented here: -# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html # # zebrad attempts to load configs in the following order: #