diff --git a/.github/workflows/cd-deploy-nodes-gcp.yml b/.github/workflows/cd-deploy-nodes-gcp.yml index a439019776d..c4508e68f70 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.yml @@ -136,6 +136,8 @@ jobs: image_name: zebrad no_cache: ${{ inputs.no_cache || false }} rust_log: info + # This step needs access to Docker Hub secrets to run successfully + secrets: inherit # Test that Zebra works using the default config with the latest Zebra version. test-configuration-file: diff --git a/.github/workflows/ci-tests.yml b/.github/workflows/ci-tests.yml index 2c8ce336706..997bf25eb00 100644 --- a/.github/workflows/ci-tests.yml +++ b/.github/workflows/ci-tests.yml @@ -130,6 +130,8 @@ jobs: rust_backtrace: full rust_lib_backtrace: full rust_log: info + # This step needs access to Docker Hub secrets to run successfully + secrets: inherit # Runs Zebra unit tests unit-tests: diff --git a/.github/workflows/sub-build-docker-image.yml b/.github/workflows/sub-build-docker-image.yml index 0908a612892..7612777dfd6 100644 --- a/.github/workflows/sub-build-docker-image.yml +++ b/.github/workflows/sub-build-docker-image.yml @@ -68,7 +68,7 @@ jobs: build: name: Build images timeout-minutes: 210 - runs-on: ubuntu-latest-xl + runs-on: ubuntu-latest outputs: image_digest: ${{ steps.docker_build.outputs.digest }} image_name: ${{ fromJSON(steps.docker_build.outputs.metadata)['image.name'] }} @@ -92,6 +92,8 @@ jobs: uses: docker/metadata-action@v5.5.1 with: # list of Docker images to use as base name for tags + # We only publish images to DockerHub if a release is not a pre-release + # Ref: https://github.com/orgs/community/discussions/26281#discussioncomment-3251177 images: | us-docker.pkg.dev/${{ vars.GCP_PROJECT }}/zebra/${{ inputs.image_name }} zfnd/${{ inputs.image_name }},enable=${{ github.event_name == 'release' && !github.event.release.prerelease }} @@ -118,11 +120,6 @@ jobs: type=edge,enable={{is_default_branch}} type=schedule - # Setup Docker Buildx to allow use of docker cache layers from GH - - name: Set up Docker Buildx - id: buildx - uses: docker/setup-buildx-action@v3 - - name: Authenticate to Google Cloud id: auth uses: google-github-actions/auth@v2.1.3 @@ -144,14 +141,20 @@ jobs: password: ${{ steps.auth.outputs.access_token }} - name: Login to DockerHub - # We only publish images to DockerHub if a release is not a pre-release - # Ref: https://github.com/orgs/community/discussions/26281#discussioncomment-3251177 - if: ${{ github.event_name == 'release' && !github.event.release.prerelease }} uses: docker/login-action@v3.2.0 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} + # Setup Docker Buildx to use Docker Build Cloud + - name: Set up Docker Buildx + id: buildx + uses: docker/setup-buildx-action@v3 + with: + version: "lab:latest" + driver: cloud + endpoint: "zfnd/zebra" + # Build and push image to Google Artifact Registry, and possibly DockerHub - name: Build & push id: docker_build @@ -171,17 +174,3 @@ jobs: # Don't read from the cache if the caller disabled it. # https://docs.docker.com/engine/reference/commandline/buildx_build/#options no-cache: ${{ inputs.no_cache }} - # To improve build speeds, for each branch we push an additional image to the registry, - # to be used as the caching layer, using the `max` caching mode. - # - # We use multiple cache sources to confirm a cache hit, starting from a per-branch cache. - # If there's no hit, we continue with a `main` branch cache, which helps us avoid - # rebuilding cargo-chef, most dependencies, and possibly some Zebra crates. - # - # The caches are tried in top-down order, the first available cache is used: - # https://github.com/moby/moby/pull/26839#issuecomment-277383550 - cache-from: | - type=registry,ref=us-docker.pkg.dev/${{ vars.GCP_PROJECT }}/zebra-caching/${{ inputs.image_name }}${{ inputs.tag_suffix }}:${{ env.GITHUB_REF_SLUG_URL }}-cache - type=registry,ref=us-docker.pkg.dev/${{ vars.GCP_PROJECT }}/zebra-caching/${{ inputs.image_name }}${{ inputs.tag_suffix }}:main-cache - cache-to: | - type=registry,ref=us-docker.pkg.dev/${{ vars.GCP_PROJECT }}/zebra-caching/${{ inputs.image_name }}${{ inputs.tag_suffix }}:${{ env.GITHUB_REF_SLUG_URL }}-cache,mode=max diff --git a/.github/workflows/sub-ci-integration-tests-gcp.yml b/.github/workflows/sub-ci-integration-tests-gcp.yml index 27f11f1f37d..7e8bd141d54 100644 --- a/.github/workflows/sub-ci-integration-tests-gcp.yml +++ b/.github/workflows/sub-ci-integration-tests-gcp.yml @@ -9,7 +9,7 @@ on: workflow_call: inputs: network: - default: 'Mainnet' + default: "Mainnet" type: string regenerate-disks: default: false @@ -59,7 +59,7 @@ jobs: if: ${{ !startsWith(github.event_name, 'pull') || !github.event.pull_request.head.repo.fork }} uses: ./.github/workflows/sub-find-cached-disks.yml with: - network: 'Testnet' + network: "Testnet" # zebrad cached checkpoint state tests @@ -72,14 +72,14 @@ jobs: # Note: the output from get-available-disks should match with the caller workflow inputs regenerate-stateful-disks: name: Zebra checkpoint - needs: [ get-available-disks ] + needs: [get-available-disks] uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !fromJSON(needs.get-available-disks.outputs.zebra_checkpoint_disk) || github.event.inputs.regenerate-disks == 'true' }} with: app_name: zebrad test_id: sync-to-checkpoint test_description: Test sync up to mandatory checkpoint - test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_DISK_REBUILD=1 -e ZEBRA_FORCE_USE_COLOR=1' + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_DISK_REBUILD=1 -e ZEBRA_FORCE_USE_COLOR=1" needs_zebra_state: false saves_to_disk: true force_save_to_disk: ${{ inputs.force_save_to_disk || false }} @@ -101,14 +101,14 @@ jobs: # Otherwise, if the state rebuild was skipped, runs immediately after the build job. test-stateful-sync: name: Zebra checkpoint update - needs: [ regenerate-stateful-disks, get-available-disks ] + needs: [regenerate-stateful-disks, get-available-disks] uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_checkpoint_disk) || needs.regenerate-stateful-disks.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: zebrad test_id: sync-past-checkpoint test_description: Test full validation sync from a cached state - test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_CHECKPOINT_SYNC=1 -e ZEBRA_FORCE_USE_COLOR=1' + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_CHECKPOINT_SYNC=1 -e ZEBRA_FORCE_USE_COLOR=1" needs_zebra_state: true saves_to_disk: false disk_suffix: checkpoint @@ -129,7 +129,7 @@ jobs: # Note: the output from get-available-disks should match with the caller workflow inputs test-full-sync: name: Zebra tip - needs: [ get-available-disks ] + needs: [get-available-disks] uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ github.event_name == 'schedule' || !fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || (github.event.inputs.run-full-sync == 'true' && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet') }} with: @@ -138,7 +138,7 @@ jobs: test_description: Test a full sync up to the tip # The value of FULL_SYNC_MAINNET_TIMEOUT_MINUTES is currently ignored. # TODO: update the test to use {{ input.network }} instead? - test_variables: '-e NETWORK=Mainnet -e FULL_SYNC_MAINNET_TIMEOUT_MINUTES=0 -e ZEBRA_FORCE_USE_COLOR=1' + test_variables: "-e NETWORK=Mainnet -e FULL_SYNC_MAINNET_TIMEOUT_MINUTES=0 -e ZEBRA_FORCE_USE_COLOR=1" # This test runs for longer than 6 hours, so it needs multiple jobs is_long_test: true needs_zebra_state: false @@ -172,21 +172,21 @@ jobs: # Otherwise, if the state rebuild was skipped, runs immediately after the build job. test-update-sync: name: Zebra tip update - needs: [ test-full-sync, get-available-disks ] + needs: [test-full-sync, get-available-disks] uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: zebrad test_id: update-to-tip test_description: Test syncing to tip with a Zebra tip state - test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_UPDATE_SYNC=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_UPDATE_SYNC=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache" needs_zebra_state: true # update the disk on every PR, to increase CI speed saves_to_disk: true force_save_to_disk: ${{ inputs.force_save_to_disk || false }} disk_suffix: tip - root_state_path: '/var/cache' - zebra_state_dir: 'zebrad-cache' + root_state_path: "/var/cache" + zebra_state_dir: "zebrad-cache" height_grep_text: 'current_height.*=.*Height.*\(' secrets: inherit @@ -205,7 +205,7 @@ jobs: # Otherwise, if the state rebuild was skipped, runs immediately after the build job. checkpoints-mainnet: name: Generate checkpoints mainnet - needs: [ test-full-sync, get-available-disks ] + needs: [test-full-sync, get-available-disks] uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: @@ -213,13 +213,13 @@ jobs: test_id: checkpoints-mainnet test_description: Generate Zebra checkpoints on mainnet # TODO: update the test to use {{ input.network }} instead? - test_variables: '-e NETWORK=Mainnet -e GENERATE_CHECKPOINTS_MAINNET=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' + test_variables: "-e NETWORK=Mainnet -e GENERATE_CHECKPOINTS_MAINNET=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache" needs_zebra_state: true # test-update-sync updates the disk on every PR, so we don't need to do it here saves_to_disk: false disk_suffix: tip - root_state_path: '/var/cache' - zebra_state_dir: 'zebrad-cache' + root_state_path: "/var/cache" + zebra_state_dir: "zebrad-cache" height_grep_text: 'current_height.*=.*Height.*\(' secrets: inherit @@ -240,7 +240,7 @@ jobs: # Note: the output from get-available-disks-testnet should match with the caller workflow inputs test-full-sync-testnet: name: Zebra tip on testnet - needs: [ get-available-disks-testnet ] + needs: [get-available-disks-testnet] uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ (github.event_name == 'schedule' && vars.SCHEDULE_TESTNET_FULL_SYNC == 'true') || !fromJSON(needs.get-available-disks-testnet.outputs.zebra_tip_disk) || (github.event.inputs.run-full-sync == 'true' && (inputs.network || vars.ZCASH_NETWORK) == 'Testnet') }} with: @@ -248,7 +248,7 @@ jobs: test_id: full-sync-testnet test_description: Test a full sync up to the tip on testnet # The value of FULL_SYNC_TESTNET_TIMEOUT_MINUTES is currently ignored. - test_variables: '-e NETWORK=Testnet -e FULL_SYNC_TESTNET_TIMEOUT_MINUTES=0 -e ZEBRA_FORCE_USE_COLOR=1' + test_variables: "-e NETWORK=Testnet -e FULL_SYNC_TESTNET_TIMEOUT_MINUTES=0 -e ZEBRA_FORCE_USE_COLOR=1" network: "Testnet" # A full testnet sync could take 2-10 hours in April 2023. # The time varies a lot due to the small number of nodes. @@ -286,14 +286,14 @@ jobs: # Otherwise, if the state rebuild was skipped, runs immediately after the build job. checkpoints-testnet: name: Generate checkpoints testnet - needs: [ test-full-sync-testnet, get-available-disks-testnet ] + needs: [test-full-sync-testnet, get-available-disks-testnet] uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks-testnet.outputs.zebra_tip_disk) || needs.test-full-sync-testnet.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: zebrad test_id: checkpoints-testnet test_description: Generate Zebra checkpoints on testnet - test_variables: '-e NETWORK=Testnet -e GENERATE_CHECKPOINTS_TESTNET=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' + test_variables: "-e NETWORK=Testnet -e GENERATE_CHECKPOINTS_TESTNET=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache" network: "Testnet" needs_zebra_state: true # update the disk on every PR, to increase CI speed @@ -301,8 +301,8 @@ jobs: saves_to_disk: true force_save_to_disk: ${{ inputs.force_save_to_disk || false }} disk_suffix: tip - root_state_path: '/var/cache' - zebra_state_dir: 'zebrad-cache' + root_state_path: "/var/cache" + zebra_state_dir: "zebrad-cache" height_grep_text: 'zebra_tip_height.*=.*Height.*\(' secrets: inherit @@ -319,7 +319,7 @@ jobs: # Otherwise, if the state rebuild was skipped, runs immediately after the build job. lightwalletd-full-sync: name: lightwalletd tip - needs: [ test-full-sync, get-available-disks ] + needs: [test-full-sync, get-available-disks] uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml # Currently the lightwalletd tests only work on Mainnet if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && (github.event_name == 'schedule' || !fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || github.event.inputs.run-lwd-sync == 'true' ) }} @@ -327,7 +327,7 @@ jobs: app_name: lightwalletd test_id: lwd-full-sync test_description: Test lightwalletd full sync - test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_FULL_SYNC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache' + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_FULL_SYNC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache" # This test runs for longer than 6 hours, so it needs multiple jobs is_long_test: true needs_zebra_state: true @@ -336,10 +336,10 @@ jobs: force_save_to_disk: ${{ inputs.force_save_to_disk || false }} disk_prefix: lwd-cache disk_suffix: tip - root_state_path: '/var/cache' - zebra_state_dir: 'zebrad-cache' - lwd_state_dir: 'lwd-cache' - height_grep_text: 'Waiting for block: ' + root_state_path: "/var/cache" + zebra_state_dir: "zebrad-cache" + lwd_state_dir: "lwd-cache" + height_grep_text: "Waiting for block: " secrets: inherit # We want to prevent multiple lightwalletd full syncs running at the same time, # but we don't want to cancel running syncs on `main` if a new PR gets merged, @@ -359,24 +359,24 @@ jobs: # Otherwise, if the state rebuild was skipped, runs immediately after the build job. lightwalletd-update-sync: name: lightwalletd tip update - needs: [ lightwalletd-full-sync, get-available-disks ] + needs: [lightwalletd-full-sync, get-available-disks] uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lightwalletd-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: lightwalletd test_id: lwd-update-sync test_description: Test lightwalletd update sync with both states - test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_UPDATE_SYNC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache' + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_UPDATE_SYNC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache" needs_zebra_state: true needs_lwd_state: true saves_to_disk: true force_save_to_disk: ${{ inputs.force_save_to_disk || false }} disk_prefix: lwd-cache disk_suffix: tip - root_state_path: '/var/cache' - zebra_state_dir: 'zebrad-cache' - lwd_state_dir: 'lwd-cache' - height_grep_text: 'Waiting for block: ' + root_state_path: "/var/cache" + zebra_state_dir: "zebrad-cache" + lwd_state_dir: "lwd-cache" + height_grep_text: "Waiting for block: " secrets: inherit # Test that Zebra can answer a synthetic RPC call, using a cached Zebra tip state @@ -391,19 +391,19 @@ jobs: # TODO: move this job below the rest of the mainnet jobs that just use Zebra cached state lightwalletd-rpc-test: name: Zebra tip JSON-RPC - needs: [ test-full-sync, get-available-disks ] + needs: [test-full-sync, get-available-disks] uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: lightwalletd test_id: fully-synced-rpc test_description: Test lightwalletd RPC with a Zebra tip state - test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_RPC_CALL=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_RPC_CALL=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache" needs_zebra_state: true saves_to_disk: false disk_suffix: tip - root_state_path: '/var/cache' - zebra_state_dir: 'zebrad-cache' + root_state_path: "/var/cache" + zebra_state_dir: "zebrad-cache" secrets: inherit # Test that Zebra can handle a lightwalletd send transaction RPC call, using a cached Zebra tip state @@ -416,21 +416,21 @@ jobs: # Otherwise, if the state rebuild was skipped, runs immediately after the build job. lightwalletd-transactions-test: name: lightwalletd tip send - needs: [ lightwalletd-full-sync, get-available-disks ] + needs: [lightwalletd-full-sync, get-available-disks] uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lightwalletd-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: lightwalletd test_id: lwd-send-transactions test_description: Test sending transactions via lightwalletd - test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_TRANSACTIONS=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache' + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_TRANSACTIONS=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache" needs_zebra_state: true needs_lwd_state: true saves_to_disk: false disk_suffix: tip - root_state_path: '/var/cache' - zebra_state_dir: 'zebrad-cache' - lwd_state_dir: 'lwd-cache' + root_state_path: "/var/cache" + zebra_state_dir: "zebrad-cache" + lwd_state_dir: "lwd-cache" secrets: inherit # Test that Zebra can handle gRPC wallet calls, using a cached Zebra tip state @@ -443,21 +443,21 @@ jobs: # Otherwise, if the state rebuild was skipped, runs immediately after the build job. lightwalletd-grpc-test: name: lightwalletd GRPC tests - needs: [ lightwalletd-full-sync, get-available-disks ] + needs: [lightwalletd-full-sync, get-available-disks] uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lightwalletd-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: lightwalletd test_id: lwd-grpc-wallet test_description: Test gRPC calls via lightwalletd - test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_GRPC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache' + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_GRPC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache" needs_zebra_state: true needs_lwd_state: true saves_to_disk: false disk_suffix: tip - root_state_path: '/var/cache' - zebra_state_dir: 'zebrad-cache' - lwd_state_dir: 'lwd-cache' + root_state_path: "/var/cache" + zebra_state_dir: "zebrad-cache" + lwd_state_dir: "lwd-cache" secrets: inherit ## getblocktemplate-rpcs using cached Zebra state on mainnet @@ -474,20 +474,20 @@ jobs: # Otherwise, if the state rebuild was skipped, runs immediately after the build job. get-block-template-test: name: get block template - needs: [ test-full-sync, get-available-disks ] + needs: [test-full-sync, get-available-disks] uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: zebrad test_id: get-block-template test_description: Test getblocktemplate RPC method via Zebra's rpc server - test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_GET_BLOCK_TEMPLATE=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_GET_BLOCK_TEMPLATE=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache" needs_zebra_state: true needs_lwd_state: false saves_to_disk: false disk_suffix: tip - root_state_path: '/var/cache' - zebra_state_dir: 'zebrad-cache' + root_state_path: "/var/cache" + zebra_state_dir: "zebrad-cache" secrets: inherit # Test that Zebra can handle a submit block RPC call, using a cached Zebra tip state @@ -516,31 +516,32 @@ jobs: zebra_state_dir: "zebrad-cache" secrets: inherit - # Test that the scanner can continue scanning where it was left when zebrad restarts. - # - # Runs: - # - after every PR is merged to `main` - # - on every PR update - # - # If the state version has changed, waits for the new cached states to be created. - # Otherwise, if the state rebuild was skipped, runs immediately after the build job. - scan-start-where-left-test: - name: Scan starts where left - needs: [test-full-sync, get-available-disks] - uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml - if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} - with: - app_name: zebrad - test_id: scan-start-where-left - test_description: Test that the scanner can continue scanning where it was left when zebrad restarts. - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_SCAN_START_WHERE_LEFT=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache" - needs_zebra_state: true - needs_lwd_state: false - saves_to_disk: true - disk_suffix: tip - root_state_path: "/var/cache" - zebra_state_dir: "zebrad-cache" - secrets: inherit + # TODO: Move this test once we have the new scanner binary. + # # Test that the scanner can continue scanning where it was left when zebrad restarts. + # # + # # Runs: + # # - after every PR is merged to `main` + # # - on every PR update + # # + # # If the state version has changed, waits for the new cached states to be created. + # # Otherwise, if the state rebuild was skipped, runs immediately after the build job. + # scan-start-where-left-test: + # name: Scan starts where left + # needs: [test-full-sync, get-available-disks] + # uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml + # if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} + # with: + # app_name: zebrad + # test_id: scan-start-where-left + # test_description: Test that the scanner can continue scanning where it was left when zebrad restarts. + # test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_SCAN_START_WHERE_LEFT=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache" + # needs_zebra_state: true + # needs_lwd_state: false + # saves_to_disk: true + # disk_suffix: tip + # root_state_path: "/var/cache" + # zebra_state_dir: "zebrad-cache" + # secrets: inherit # Test that the scan task registers keys, deletes keys, and subscribes to results for keys while running. # @@ -589,7 +590,7 @@ jobs: get-block-template-test, submit-block-test, scan-start-where-left-test, - scan-task-commands-test + scan-task-commands-test, ] # Only open tickets for failed scheduled jobs, manual workflow runs, or `main` branch merges. # (PR statuses are already reported in the PR jobs list, and checked by Mergify.) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5486a3738a5..d920485715e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,8 +9,13 @@ and this project adheres to [Semantic Versioning](https://semver.org). ### Changed -- We realized that a longer than `zcashd` end of support could be problematic in some cases so we reverted back from 20 to 16 weeks ([#8530](https://github.com/ZcashFoundation/zebra/pull/8530)) - +- We realized that a longer than `zcashd` end of support could be problematic in + some cases so we reverted back from 20 to 16 weeks + ([#8530](https://github.com/ZcashFoundation/zebra/pull/8530)) +- The `zebrad` binary no longer contains the scanner of shielded transactions. + This means `zebrad` no longer contains users' viewing keys. +- We're no longer using general conditional compilation attributes for `tor`, + but only feature flags instead. - Fixed a bug with trailing characters in the openapi spec method descriptions((#8597)[https://github.com/ZcashFoundation/zebra/pull/8597]) ## [Zebra 1.7.0](https://github.com/ZcashFoundation/zebra/releases/tag/v1.7.0) - 2024-05-07 diff --git a/tower-batch-control/Cargo.toml b/tower-batch-control/Cargo.toml index bfc8a4a15dc..d62a31773cc 100644 --- a/tower-batch-control/Cargo.toml +++ b/tower-batch-control/Cargo.toml @@ -47,3 +47,6 @@ tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.13" } tower-test = "0.4.0" zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.37" } + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] } diff --git a/zebra-network/src/isolated.rs b/zebra-network/src/isolated.rs index 62b3eee7a93..c6ca66d53b2 100644 --- a/zebra-network/src/isolated.rs +++ b/zebra-network/src/isolated.rs @@ -16,8 +16,7 @@ use crate::{ // Wait until `arti-client`'s dependency `x25519-dalek v1.2.0` is updated to a higher version. (#5492) // #[cfg(feature = "tor")] -#[cfg(tor)] -pub(crate) mod tor; +// pub(crate) mod tor; #[cfg(test)] mod tests; diff --git a/zebra-network/src/lib.rs b/zebra-network/src/lib.rs index 254f85cde87..9b61224dc14 100644 --- a/zebra-network/src/lib.rs +++ b/zebra-network/src/lib.rs @@ -165,13 +165,11 @@ mod protocol; // Wait until `arti-client`'s dependency `x25519-dalek v1.2.0` is updated to a higher version. (#5492) // #[cfg(feature = "tor")] -#[cfg(tor)] -pub use crate::isolated::tor::connect_isolated_tor; +// pub use crate::isolated::tor::connect_isolated_tor; // Wait until `arti-client`'s dependency `x25519-dalek v1.2.0` is updated to a higher version. (#5492) // #[cfg(all(feature = "tor", any(test, feature = "proptest-impl")))] -#[cfg(tor)] -pub use crate::isolated::tor::connect_isolated_tor_with_inbound; +// pub use crate::isolated::tor::connect_isolated_tor_with_inbound; #[cfg(any(test, feature = "proptest-impl"))] pub use crate::{ diff --git a/zebra-state/src/arbitrary.rs b/zebra-state/src/arbitrary.rs index 9f87c749c98..c2176296ca7 100644 --- a/zebra-state/src/arbitrary.rs +++ b/zebra-state/src/arbitrary.rs @@ -50,27 +50,6 @@ where } } -impl From for ChainTipBlock { - fn from(prepared: SemanticallyVerifiedBlock) -> Self { - let SemanticallyVerifiedBlock { - block, - hash, - height, - new_outputs: _, - transaction_hashes, - } = prepared; - - Self { - hash, - height, - time: block.header.time, - transactions: block.transactions.clone(), - transaction_hashes, - previous_block_hash: block.header.previous_block_hash, - } - } -} - impl SemanticallyVerifiedBlock { /// Returns a [`ContextuallyVerifiedBlock`] created from this block, /// with fake zero-valued spent UTXOs. diff --git a/zebra-state/src/lib.rs b/zebra-state/src/lib.rs index 59088a931bd..58a83c9e15d 100644 --- a/zebra-state/src/lib.rs +++ b/zebra-state/src/lib.rs @@ -46,8 +46,10 @@ pub use request::{ }; pub use response::{KnownBlock, MinedTx, ReadResponse, Response}; pub use service::{ - chain_tip::{ChainTipChange, LatestChainTip, TipAction}, - check, init, spawn_init, + chain_tip::{ChainTipBlock, ChainTipChange, ChainTipSender, LatestChainTip, TipAction}, + check, init, init_read_only, + non_finalized_state::NonFinalizedState, + spawn_init, spawn_init_read_only, watch_receiver::WatchReceiver, OutputIndex, OutputLocation, TransactionIndex, TransactionLocation, }; @@ -76,7 +78,6 @@ pub use response::GetBlockTemplateChainInfo; #[cfg(any(test, feature = "proptest-impl"))] pub use service::{ arbitrary::{populated_state, CHAIN_TIP_UPDATE_WAIT_LIMIT}, - chain_tip::{ChainTipBlock, ChainTipSender}, finalized_state::{RawBytes, KV, MAX_ON_DISK_HEIGHT}, init_test, init_test_services, }; @@ -96,4 +97,4 @@ pub(crate) use config::hidden::{ write_database_format_version_to_disk, write_state_database_format_version_to_disk, }; -pub(crate) use request::ContextuallyVerifiedBlock; +pub use request::ContextuallyVerifiedBlock; diff --git a/zebra-state/src/service.rs b/zebra-state/src/service.rs index 994e8ad6b32..0fbe8d8eaad 100644 --- a/zebra-state/src/service.rs +++ b/zebra-state/src/service.rs @@ -24,15 +24,6 @@ use std::{ time::{Duration, Instant}, }; -#[cfg(feature = "elasticsearch")] -use elasticsearch::{ - auth::Credentials::Basic, - cert::CertificateValidation, - http::transport::{SingleNodeConnectionPool, TransportBuilder}, - http::Url, - Elasticsearch, -}; - use futures::future::FutureExt; use tokio::sync::{oneshot, watch}; use tower::{util::BoxService, Service, ServiceExt}; @@ -319,29 +310,12 @@ impl StateService { checkpoint_verify_concurrency_limit: usize, ) -> (Self, ReadStateService, LatestChainTip, ChainTipChange) { let timer = CodeTimer::start(); - - #[cfg(feature = "elasticsearch")] - let finalized_state = { - let conn_pool = SingleNodeConnectionPool::new( - Url::parse(config.elasticsearch_url.as_str()) - .expect("configured elasticsearch url is invalid"), - ); - let transport = TransportBuilder::new(conn_pool) - .cert_validation(CertificateValidation::None) - .auth(Basic( - config.clone().elasticsearch_username, - config.clone().elasticsearch_password, - )) - .build() - .expect("elasticsearch transport builder should not fail"); - let elastic_db = Some(Elasticsearch::new(transport)); - - FinalizedState::new(&config, network, elastic_db) - }; - - #[cfg(not(feature = "elasticsearch"))] - let finalized_state = { FinalizedState::new(&config, network) }; - + let finalized_state = FinalizedState::new( + &config, + network, + #[cfg(feature = "elasticsearch")] + true, + ); timer.finish(module_path!(), line!(), "opening finalized state database"); let timer = CodeTimer::start(); @@ -387,7 +361,7 @@ impl StateService { let read_service = ReadStateService::new( &finalized_state, - block_write_task, + Some(block_write_task), non_finalized_state_receiver, ); @@ -828,14 +802,14 @@ impl ReadStateService { /// and a watch channel for updating the shared recent non-finalized chain. pub(crate) fn new( finalized_state: &FinalizedState, - block_write_task: Arc>, + block_write_task: Option>>, non_finalized_state_receiver: watch::Receiver, ) -> Self { let read_service = Self { network: finalized_state.network(), db: finalized_state.db.clone(), non_finalized_state_receiver: WatchReceiver::new(non_finalized_state_receiver), - block_write_task: Some(block_write_task), + block_write_task, }; tracing::debug!("created new read-only state service"); @@ -1945,6 +1919,52 @@ pub fn init( ) } +/// Initialize a read state service from the provided [`Config`]. +/// Returns a read-only state service, +/// +/// Each `network` has its own separate on-disk database. +/// +/// To share access to the state, clone the returned [`ReadStateService`]. +pub fn init_read_only( + config: Config, + network: &Network, +) -> ( + ReadStateService, + ZebraDb, + tokio::sync::watch::Sender, +) { + let finalized_state = FinalizedState::new_with_debug( + &config, + network, + true, + #[cfg(feature = "elasticsearch")] + false, + true, + ); + let (non_finalized_state_sender, non_finalized_state_receiver) = + tokio::sync::watch::channel(NonFinalizedState::new(network)); + + ( + ReadStateService::new(&finalized_state, None, non_finalized_state_receiver), + finalized_state.db.clone(), + non_finalized_state_sender, + ) +} + +/// Calls [`init_read_only`] with the provided [`Config`] and [`Network`] from a blocking task. +/// Returns a [`tokio::task::JoinHandle`] with a read state service and chain tip sender. +pub fn spawn_init_read_only( + config: Config, + network: &Network, +) -> tokio::task::JoinHandle<( + ReadStateService, + ZebraDb, + tokio::sync::watch::Sender, +)> { + let network = network.clone(); + tokio::task::spawn_blocking(move || init_read_only(config, &network)) +} + /// Calls [`init`] with the provided [`Config`] and [`Network`] from a blocking task. /// Returns a [`tokio::task::JoinHandle`] with a boxed state service, /// a read state service, and receivers for state chain tip updates. diff --git a/zebra-state/src/service/chain_tip.rs b/zebra-state/src/service/chain_tip.rs index 3f3468f4ca6..3fb016aa356 100644 --- a/zebra-state/src/service/chain_tip.rs +++ b/zebra-state/src/service/chain_tip.rs @@ -107,15 +107,15 @@ impl From for ChainTipBlock { } } -impl From for ChainTipBlock { - fn from(finalized: CheckpointVerifiedBlock) -> Self { - let CheckpointVerifiedBlock(SemanticallyVerifiedBlock { +impl From for ChainTipBlock { + fn from(prepared: SemanticallyVerifiedBlock) -> Self { + let SemanticallyVerifiedBlock { block, hash, height, + new_outputs: _, transaction_hashes, - .. - }) = finalized; + } = prepared; Self { hash, @@ -128,6 +128,12 @@ impl From for ChainTipBlock { } } +impl From for ChainTipBlock { + fn from(CheckpointVerifiedBlock(prepared): CheckpointVerifiedBlock) -> Self { + prepared.into() + } +} + /// A sender for changes to the non-finalized and finalized chain tips. #[derive(Debug)] pub struct ChainTipSender { diff --git a/zebra-state/src/service/finalized_state.rs b/zebra-state/src/service/finalized_state.rs index 7256d89dac2..f8c9bade5c1 100644 --- a/zebra-state/src/service/finalized_state.rs +++ b/zebra-state/src/service/finalized_state.rs @@ -142,14 +142,14 @@ impl FinalizedState { pub fn new( config: &Config, network: &Network, - #[cfg(feature = "elasticsearch")] elastic_db: Option, + #[cfg(feature = "elasticsearch")] enable_elastic_db: bool, ) -> Self { Self::new_with_debug( config, network, false, #[cfg(feature = "elasticsearch")] - elastic_db, + enable_elastic_db, false, ) } @@ -162,9 +162,37 @@ impl FinalizedState { config: &Config, network: &Network, debug_skip_format_upgrades: bool, - #[cfg(feature = "elasticsearch")] elastic_db: Option, + #[cfg(feature = "elasticsearch")] enable_elastic_db: bool, read_only: bool, ) -> Self { + #[cfg(feature = "elasticsearch")] + let elastic_db = if enable_elastic_db { + use elasticsearch::{ + auth::Credentials::Basic, + cert::CertificateValidation, + http::transport::{SingleNodeConnectionPool, TransportBuilder}, + http::Url, + Elasticsearch, + }; + + let conn_pool = SingleNodeConnectionPool::new( + Url::parse(config.elasticsearch_url.as_str()) + .expect("configured elasticsearch url is invalid"), + ); + let transport = TransportBuilder::new(conn_pool) + .cert_validation(CertificateValidation::None) + .auth(Basic( + config.clone().elasticsearch_username, + config.clone().elasticsearch_password, + )) + .build() + .expect("elasticsearch transport builder should not fail"); + + Some(Elasticsearch::new(transport)) + } else { + None + }; + let db = ZebraDb::new( config, STATE_DATABASE_KIND, diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs b/zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs index 14a8dd6c2a7..eb12cf41f1b 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs @@ -62,7 +62,7 @@ fn test_raw_rocksdb_column_families_with_network(network: Network) { &Config::ephemeral(), &network, #[cfg(feature = "elasticsearch")] - None, + false, ); // Snapshot the column family names diff --git a/zebra-state/src/service/finalized_state/tests/prop.rs b/zebra-state/src/service/finalized_state/tests/prop.rs index d48761795f5..7ff2c3ac91b 100644 --- a/zebra-state/src/service/finalized_state/tests/prop.rs +++ b/zebra-state/src/service/finalized_state/tests/prop.rs @@ -24,7 +24,7 @@ fn blocks_with_v5_transactions() -> Result<()> { .and_then(|v| v.parse().ok()) .unwrap_or(DEFAULT_PARTIAL_CHAIN_PROPTEST_CASES)), |((chain, count, network, _history_tree) in PreparedChain::default())| { - let mut state = FinalizedState::new(&Config::ephemeral(), &network, #[cfg(feature = "elasticsearch")] None); + let mut state = FinalizedState::new(&Config::ephemeral(), &network, #[cfg(feature = "elasticsearch")] false); let mut height = Height(0); // use `count` to minimize test failures, so they are easier to diagnose for block in chain.iter().take(count) { @@ -65,7 +65,7 @@ fn all_upgrades_and_wrong_commitments_with_fake_activation_heights() -> Result<( .unwrap_or(DEFAULT_PARTIAL_CHAIN_PROPTEST_CASES)), |((chain, _count, network, _history_tree) in PreparedChain::default().with_valid_commitments().no_shrink())| { - let mut state = FinalizedState::new(&Config::ephemeral(), &network, #[cfg(feature = "elasticsearch")] None); + let mut state = FinalizedState::new(&Config::ephemeral(), &network, #[cfg(feature = "elasticsearch")] false); let mut height = Height(0); let heartwood_height = NetworkUpgrade::Heartwood.activation_height(&network).unwrap(); let heartwood_height_plus1 = (heartwood_height + 1).unwrap(); diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs index dbd0e7c1dae..c5f1ba371d5 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs @@ -169,7 +169,7 @@ fn test_block_and_transaction_data_with_network(network: Network) { &Config::ephemeral(), &network, #[cfg(feature = "elasticsearch")] - None, + false, ); // Assert that empty databases are the same, regardless of the network. diff --git a/zebra-state/src/service/non_finalized_state/tests/prop.rs b/zebra-state/src/service/non_finalized_state/tests/prop.rs index d4ef55344c0..68f7d77588b 100644 --- a/zebra-state/src/service/non_finalized_state/tests/prop.rs +++ b/zebra-state/src/service/non_finalized_state/tests/prop.rs @@ -479,7 +479,7 @@ fn rejection_restores_internal_state_genesis() -> Result<()> { } ))| { let mut state = NonFinalizedState::new(&network); - let finalized_state = FinalizedState::new(&Config::ephemeral(), &network, #[cfg(feature = "elasticsearch")] None); + let finalized_state = FinalizedState::new(&Config::ephemeral(), &network, #[cfg(feature = "elasticsearch")] false); let fake_value_pool = ValueBalance::::fake_populated_pool(); finalized_state.set_finalized_value_pool(fake_value_pool); diff --git a/zebra-state/src/service/non_finalized_state/tests/vectors.rs b/zebra-state/src/service/non_finalized_state/tests/vectors.rs index 6f908f080c0..b489d6f94f0 100644 --- a/zebra-state/src/service/non_finalized_state/tests/vectors.rs +++ b/zebra-state/src/service/non_finalized_state/tests/vectors.rs @@ -157,7 +157,7 @@ fn best_chain_wins_for_network(network: Network) -> Result<()> { &Config::ephemeral(), &network, #[cfg(feature = "elasticsearch")] - None, + false, ); state.commit_new_chain(block2.prepare(), &finalized_state)?; @@ -194,7 +194,7 @@ fn finalize_pops_from_best_chain_for_network(network: Network) -> Result<()> { &Config::ephemeral(), &network, #[cfg(feature = "elasticsearch")] - None, + false, ); let fake_value_pool = ValueBalance::::fake_populated_pool(); @@ -245,7 +245,7 @@ fn commit_block_extending_best_chain_doesnt_drop_worst_chains_for_network( &Config::ephemeral(), &network, #[cfg(feature = "elasticsearch")] - None, + false, ); let fake_value_pool = ValueBalance::::fake_populated_pool(); @@ -289,7 +289,7 @@ fn shorter_chain_can_be_best_chain_for_network(network: Network) -> Result<()> { &Config::ephemeral(), &network, #[cfg(feature = "elasticsearch")] - None, + false, ); let fake_value_pool = ValueBalance::::fake_populated_pool(); @@ -334,7 +334,7 @@ fn longer_chain_with_more_work_wins_for_network(network: Network) -> Result<()> &Config::ephemeral(), &network, #[cfg(feature = "elasticsearch")] - None, + false, ); let fake_value_pool = ValueBalance::::fake_populated_pool(); @@ -378,7 +378,7 @@ fn equal_length_goes_to_more_work_for_network(network: Network) -> Result<()> { &Config::ephemeral(), &network, #[cfg(feature = "elasticsearch")] - None, + false, ); let fake_value_pool = ValueBalance::::fake_populated_pool(); @@ -426,7 +426,7 @@ fn history_tree_is_updated_for_network_upgrade( &Config::ephemeral(), &network, #[cfg(feature = "elasticsearch")] - None, + false, ); state @@ -525,7 +525,7 @@ fn commitment_is_validated_for_network_upgrade(network: Network, network_upgrade &Config::ephemeral(), &network, #[cfg(feature = "elasticsearch")] - None, + false, ); state diff --git a/zebra-state/src/tests/setup.rs b/zebra-state/src/tests/setup.rs index d407726330e..cc53a0d7ee5 100644 --- a/zebra-state/src/tests/setup.rs +++ b/zebra-state/src/tests/setup.rs @@ -101,7 +101,7 @@ pub(crate) fn new_state_with_mainnet_genesis( // The tests that use this setup function also commit invalid blocks to the state. true, #[cfg(feature = "elasticsearch")] - None, + false, false, ); let non_finalized_state = NonFinalizedState::new(&network); diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index a86d2afe8a2..27bb5273959 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -301,3 +301,6 @@ zebra-grpc = { path = "../zebra-grpc", version = "0.1.0-alpha.4" } # https://github.com/rust-lang/cargo/issues/9096 # zebra-utils { path = "../zebra-utils", artifact = "bin:zebra-checkpoints" } zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.37" } + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] } diff --git a/zebrad/src/commands/start.rs b/zebrad/src/commands/start.rs index aa2b3f781f9..50d083d52d8 100644 --- a/zebrad/src/commands/start.rs +++ b/zebrad/src/commands/start.rs @@ -45,11 +45,6 @@ //! * Progress Task //! * logs progress towards the chain tip //! -//! Shielded Scanning: -//! * Shielded Scanner Task -//! * if the user has configured Zebra with their shielded viewing keys, scans new and existing -//! blocks for transactions that use those keys -//! //! Block Mining: //! * Internal Miner Task //! * if the user has configured Zebra to mine blocks, spawns tasks to generate new blocks, @@ -339,24 +334,6 @@ impl StartCmd { tokio::spawn(syncer.sync().in_current_span()) }; - #[cfg(feature = "shielded-scan")] - // Spawn never ending scan task only if we have keys to scan for. - let scan_task_handle = { - // TODO: log the number of keys and update the scan_task_starts() test - info!("spawning shielded scanner with configured viewing keys"); - zebra_scan::spawn_init( - config.shielded_scan.clone(), - config.network.network.clone(), - state, - chain_tip_change, - ) - }; - - #[cfg(not(feature = "shielded-scan"))] - // Spawn a dummy scan task which doesn't do anything and never finishes. - let scan_task_handle: tokio::task::JoinHandle> = - tokio::spawn(std::future::pending().in_current_span()); - // And finally, spawn the internal Zcash miner, if it is enabled. // // TODO: add a config to enable the miner rather than a feature. @@ -398,7 +375,6 @@ impl StartCmd { pin!(tx_gossip_task_handle); pin!(progress_task_handle); pin!(end_of_support_task_handle); - pin!(scan_task_handle); pin!(miner_task_handle); // startup tasks @@ -487,10 +463,6 @@ impl StartCmd { Ok(()) } - scan_result = &mut scan_task_handle => scan_result - .expect("unexpected panic in the scan task") - .map(|_| info!("scan task exited")), - miner_result = &mut miner_task_handle => miner_result .expect("unexpected panic in the miner task") .map(|_| info!("miner task exited")), @@ -519,7 +491,6 @@ impl StartCmd { tx_gossip_task_handle.abort(); progress_task_handle.abort(); end_of_support_task_handle.abort(); - scan_task_handle.abort(); miner_task_handle.abort(); // startup tasks diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index 2c8c692d4b9..a502447386f 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -2903,195 +2903,198 @@ async fn fully_synced_rpc_z_getsubtreesbyindex_snapshot_test() -> Result<()> { Ok(()) } -/// Test that the scanner task gets started when the node starts. -#[test] -#[cfg(feature = "shielded-scan")] -fn scan_task_starts() -> Result<()> { - use indexmap::IndexMap; - use zebra_scan::tests::ZECPAGES_SAPLING_VIEWING_KEY; - - let _init_guard = zebra_test::init(); - - let test_type = TestType::LaunchWithEmptyState { - launches_lightwalletd: false, - }; - let mut config = default_test_config(&Mainnet)?; - let mut keys = IndexMap::new(); - keys.insert(ZECPAGES_SAPLING_VIEWING_KEY.to_string(), 1); - config.shielded_scan.sapling_keys_to_scan = keys; - - // Start zebra with the config. - let mut zebrad = testdir()? - .with_exact_config(&config)? - .spawn_child(args!["start"])? - .with_timeout(test_type.zebrad_timeout()); - - // Check scanner was started. - zebrad.expect_stdout_line_matches("loaded Zebra scanner cache")?; - - // Look for 2 scanner notices indicating we are below sapling activation. - zebrad.expect_stdout_line_matches("scanner is waiting for Sapling activation. Current tip: [0-9]{1,4}, Sapling activation: 419200")?; - zebrad.expect_stdout_line_matches("scanner is waiting for Sapling activation. Current tip: [0-9]{1,4}, Sapling activation: 419200")?; - - // Kill the node. - zebrad.kill(false)?; - - // Check that scan task started and the first scanning is done. - let output = zebrad.wait_with_output()?; - - // Make sure the command was killed - output.assert_was_killed()?; - output.assert_failure()?; - - Ok(()) -} - -/// Test that the scanner gRPC server starts when the node starts. -#[tokio::test] -#[cfg(all(feature = "shielded-scan", not(target_os = "windows")))] -async fn scan_rpc_server_starts() -> Result<()> { - use zebra_grpc::scanner::{scanner_client::ScannerClient, Empty}; - - let _init_guard = zebra_test::init(); - - let test_type = TestType::LaunchWithEmptyState { - launches_lightwalletd: false, - }; - - let port = random_known_port(); - let listen_addr = format!("127.0.0.1:{port}"); - let mut config = default_test_config(&Mainnet)?; - config.shielded_scan.listen_addr = Some(listen_addr.parse()?); - - // Start zebra with the config. - let mut zebrad = testdir()? - .with_exact_config(&config)? - .spawn_child(args!["start"])? - .with_timeout(test_type.zebrad_timeout()); - - // Wait until gRPC server is starting. - tokio::time::sleep(LAUNCH_DELAY).await; - zebrad.expect_stdout_line_matches("starting scan gRPC server")?; - tokio::time::sleep(Duration::from_secs(1)).await; - - let mut client = ScannerClient::connect(format!("http://{listen_addr}")).await?; - - let request = tonic::Request::new(Empty {}); - - client.get_info(request).await?; - - // Kill the node. - zebrad.kill(false)?; - - // Check that scan task started and the first scanning is done. - let output = zebrad.wait_with_output()?; - - // Make sure the command was killed - output.assert_was_killed()?; - output.assert_failure()?; - - Ok(()) -} - -/// Test that the scanner can continue scanning where it was left when zebrad restarts. -/// -/// Needs a cache state close to the tip. A possible way to run it locally is: -/// -/// export ZEBRA_CACHED_STATE_DIR="/path/to/zebra/state" -/// cargo test scan_start_where_left --features="shielded-scan" -- --ignored --nocapture -/// -/// The test will run zebrad with a key to scan, scan the first few blocks after sapling and then stops. -/// Then it will restart zebrad and check that it resumes scanning where it was left. -/// -/// Note: This test will remove all the contents you may have in the ZEBRA_CACHED_STATE_DIR/private-scan directory -/// so it can start with an empty scanning state. -#[ignore] -#[test] -#[cfg(feature = "shielded-scan")] -fn scan_start_where_left() -> Result<()> { - use indexmap::IndexMap; - use zebra_scan::{storage::db::SCANNER_DATABASE_KIND, tests::ZECPAGES_SAPLING_VIEWING_KEY}; - - let _init_guard = zebra_test::init(); - - // use `UpdateZebraCachedStateNoRpc` as the test type to make sure a zebrad cache state is available. - let test_type = TestType::UpdateZebraCachedStateNoRpc; - if let Some(cache_dir) = test_type.zebrad_state_path("scan test") { - // Add a key to the config - let mut config = default_test_config(&Mainnet)?; - let mut keys = IndexMap::new(); - keys.insert(ZECPAGES_SAPLING_VIEWING_KEY.to_string(), 1); - config.shielded_scan.sapling_keys_to_scan = keys; - - // Add the cache dir to shielded scan, make it the same as the zebrad cache state. - config - .shielded_scan - .db_config_mut() - .cache_dir - .clone_from(&cache_dir); - config.shielded_scan.db_config_mut().ephemeral = false; - - // Add the cache dir to state. - config.state.cache_dir.clone_from(&cache_dir); - config.state.ephemeral = false; - - // Remove the scan directory before starting. - let scan_db_path = cache_dir.join(SCANNER_DATABASE_KIND); - fs::remove_dir_all(std::path::Path::new(&scan_db_path)).ok(); - - // Start zebra with the config. - let mut zebrad = testdir()? - .with_exact_config(&config)? - .spawn_child(args!["start"])? - .with_timeout(test_type.zebrad_timeout()); - - // Check scanner was started. - zebrad.expect_stdout_line_matches("loaded Zebra scanner cache")?; - - // The first time - zebrad.expect_stdout_line_matches( - r"Scanning the blockchain for key 0, started at block 419200, now at block 420000", - )?; - - // Make sure scanner scans a few blocks. - zebrad.expect_stdout_line_matches( - r"Scanning the blockchain for key 0, started at block 419200, now at block 430000", - )?; - zebrad.expect_stdout_line_matches( - r"Scanning the blockchain for key 0, started at block 419200, now at block 440000", - )?; - - // Kill the node. - zebrad.kill(false)?; - let output = zebrad.wait_with_output()?; - - // Make sure the command was killed - output.assert_was_killed()?; - output.assert_failure()?; - - // Start the node again. - let mut zebrad = testdir()? - .with_exact_config(&config)? - .spawn_child(args!["start"])? - .with_timeout(test_type.zebrad_timeout()); - - // Resuming message. - zebrad.expect_stdout_line_matches( - "Last scanned height for key number 0 is 439000, resuming at 439001", - )?; - zebrad.expect_stdout_line_matches("loaded Zebra scanner cache")?; - - // Start scanning where it was left. - zebrad.expect_stdout_line_matches( - r"Scanning the blockchain for key 0, started at block 439001, now at block 440000", - )?; - zebrad.expect_stdout_line_matches( - r"Scanning the blockchain for key 0, started at block 439001, now at block 450000", - )?; - } - - Ok(()) -} +// TODO: Move this test once we have the new scanner binary. +// /// Test that the scanner task gets started when the node starts. +// #[test] +// #[cfg(feature = "shielded-scan")] +// fn scan_task_starts() -> Result<()> { +// use indexmap::IndexMap; +// use zebra_scan::tests::ZECPAGES_SAPLING_VIEWING_KEY; + +// let _init_guard = zebra_test::init(); + +// let test_type = TestType::LaunchWithEmptyState { +// launches_lightwalletd: false, +// }; +// let mut config = default_test_config(&Mainnet)?; +// let mut keys = IndexMap::new(); +// keys.insert(ZECPAGES_SAPLING_VIEWING_KEY.to_string(), 1); +// config.shielded_scan.sapling_keys_to_scan = keys; + +// // Start zebra with the config. +// let mut zebrad = testdir()? +// .with_exact_config(&config)? +// .spawn_child(args!["start"])? +// .with_timeout(test_type.zebrad_timeout()); + +// // Check scanner was started. +// zebrad.expect_stdout_line_matches("loaded Zebra scanner cache")?; + +// // Look for 2 scanner notices indicating we are below sapling activation. +// zebrad.expect_stdout_line_matches("scanner is waiting for Sapling activation. Current tip: [0-9]{1,4}, Sapling activation: 419200")?; +// zebrad.expect_stdout_line_matches("scanner is waiting for Sapling activation. Current tip: [0-9]{1,4}, Sapling activation: 419200")?; + +// // Kill the node. +// zebrad.kill(false)?; + +// // Check that scan task started and the first scanning is done. +// let output = zebrad.wait_with_output()?; + +// // Make sure the command was killed +// output.assert_was_killed()?; +// output.assert_failure()?; + +// Ok(()) +// } + +// TODO: Move this test once we have the new scanner binary. +// /// Test that the scanner gRPC server starts when the node starts. +// #[tokio::test] +// #[cfg(all(feature = "shielded-scan", not(target_os = "windows")))] +// async fn scan_rpc_server_starts() -> Result<()> { +// use zebra_grpc::scanner::{scanner_client::ScannerClient, Empty}; + +// let _init_guard = zebra_test::init(); + +// let test_type = TestType::LaunchWithEmptyState { +// launches_lightwalletd: false, +// }; + +// let port = random_known_port(); +// let listen_addr = format!("127.0.0.1:{port}"); +// let mut config = default_test_config(&Mainnet)?; +// config.shielded_scan.listen_addr = Some(listen_addr.parse()?); + +// // Start zebra with the config. +// let mut zebrad = testdir()? +// .with_exact_config(&config)? +// .spawn_child(args!["start"])? +// .with_timeout(test_type.zebrad_timeout()); + +// // Wait until gRPC server is starting. +// tokio::time::sleep(LAUNCH_DELAY).await; +// zebrad.expect_stdout_line_matches("starting scan gRPC server")?; +// tokio::time::sleep(Duration::from_secs(1)).await; + +// let mut client = ScannerClient::connect(format!("http://{listen_addr}")).await?; + +// let request = tonic::Request::new(Empty {}); + +// client.get_info(request).await?; + +// // Kill the node. +// zebrad.kill(false)?; + +// // Check that scan task started and the first scanning is done. +// let output = zebrad.wait_with_output()?; + +// // Make sure the command was killed +// output.assert_was_killed()?; +// output.assert_failure()?; + +// Ok(()) +// } + +// TODO: Move this test once we have the new scanner binary. +// /// Test that the scanner can continue scanning where it was left when zebrad restarts. +// /// +// /// Needs a cache state close to the tip. A possible way to run it locally is: +// /// +// /// export ZEBRA_CACHED_STATE_DIR="/path/to/zebra/state" +// /// cargo test scan_start_where_left --features="shielded-scan" -- --ignored --nocapture +// /// +// /// The test will run zebrad with a key to scan, scan the first few blocks after sapling and then stops. +// /// Then it will restart zebrad and check that it resumes scanning where it was left. +// /// +// /// Note: This test will remove all the contents you may have in the ZEBRA_CACHED_STATE_DIR/private-scan directory +// /// so it can start with an empty scanning state. +// #[ignore] +// #[test] +// #[cfg(feature = "shielded-scan")] +// fn scan_start_where_left() -> Result<()> { +// use indexmap::IndexMap; +// use zebra_scan::{storage::db::SCANNER_DATABASE_KIND, tests::ZECPAGES_SAPLING_VIEWING_KEY}; + +// let _init_guard = zebra_test::init(); + +// // use `UpdateZebraCachedStateNoRpc` as the test type to make sure a zebrad cache state is available. +// let test_type = TestType::UpdateZebraCachedStateNoRpc; +// if let Some(cache_dir) = test_type.zebrad_state_path("scan test") { +// // Add a key to the config +// let mut config = default_test_config(&Mainnet)?; +// let mut keys = IndexMap::new(); +// keys.insert(ZECPAGES_SAPLING_VIEWING_KEY.to_string(), 1); +// config.shielded_scan.sapling_keys_to_scan = keys; + +// // Add the cache dir to shielded scan, make it the same as the zebrad cache state. +// config +// .shielded_scan +// .db_config_mut() +// .cache_dir +// .clone_from(&cache_dir); +// config.shielded_scan.db_config_mut().ephemeral = false; + +// // Add the cache dir to state. +// config.state.cache_dir.clone_from(&cache_dir); +// config.state.ephemeral = false; + +// // Remove the scan directory before starting. +// let scan_db_path = cache_dir.join(SCANNER_DATABASE_KIND); +// fs::remove_dir_all(std::path::Path::new(&scan_db_path)).ok(); + +// // Start zebra with the config. +// let mut zebrad = testdir()? +// .with_exact_config(&config)? +// .spawn_child(args!["start"])? +// .with_timeout(test_type.zebrad_timeout()); + +// // Check scanner was started. +// zebrad.expect_stdout_line_matches("loaded Zebra scanner cache")?; + +// // The first time +// zebrad.expect_stdout_line_matches( +// r"Scanning the blockchain for key 0, started at block 419200, now at block 420000", +// )?; + +// // Make sure scanner scans a few blocks. +// zebrad.expect_stdout_line_matches( +// r"Scanning the blockchain for key 0, started at block 419200, now at block 430000", +// )?; +// zebrad.expect_stdout_line_matches( +// r"Scanning the blockchain for key 0, started at block 419200, now at block 440000", +// )?; + +// // Kill the node. +// zebrad.kill(false)?; +// let output = zebrad.wait_with_output()?; + +// // Make sure the command was killed +// output.assert_was_killed()?; +// output.assert_failure()?; + +// // Start the node again. +// let mut zebrad = testdir()? +// .with_exact_config(&config)? +// .spawn_child(args!["start"])? +// .with_timeout(test_type.zebrad_timeout()); + +// // Resuming message. +// zebrad.expect_stdout_line_matches( +// "Last scanned height for key number 0 is 439000, resuming at 439001", +// )?; +// zebrad.expect_stdout_line_matches("loaded Zebra scanner cache")?; + +// // Start scanning where it was left. +// zebrad.expect_stdout_line_matches( +// r"Scanning the blockchain for key 0, started at block 439001, now at block 440000", +// )?; +// zebrad.expect_stdout_line_matches( +// r"Scanning the blockchain for key 0, started at block 439001, now at block 450000", +// )?; +// } + +// Ok(()) +// } // TODO: Add this test to CI (#8236) /// Tests successful: