diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index fdaa0c8628f7..4fc5b97caae0 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -13,7 +13,7 @@ # - Multiple owners are supported. # - Either handle (e.g, @github_user or @github/team) or email can be used. Keep in mind, # that handles might work better because they are more recognizable on GitHub, -# eyou can use them for mentioning unlike an email. +# you can use them for mentioning unlike an email. # - The latest matching rule, if multiple, takes precedence. # CI diff --git a/.github/scripts/common/lib.sh b/.github/scripts/common/lib.sh index 29dc269ffd23..932a6d546c37 100755 --- a/.github/scripts/common/lib.sh +++ b/.github/scripts/common/lib.sh @@ -237,7 +237,7 @@ fetch_release_artifacts() { popd > /dev/null } -# Fetch the release artifacts like binary and sigantures from S3. Assumes the ENV are set: +# Fetch the release artifacts like binary and signatures from S3. Assumes the ENV are set: # - RELEASE_ID # - GITHUB_TOKEN # - REPO in the form paritytech/polkadot diff --git a/.github/workflows/check-links.yml b/.github/workflows/check-links.yml index 903d7a3fcb3d..58065f369c9c 100644 --- a/.github/workflows/check-links.yml +++ b/.github/workflows/check-links.yml @@ -3,8 +3,8 @@ name: Check links on: pull_request: paths: - - "*.rs" - - "*.prdoc" + - "**.rs" + - "**.prdoc" - ".github/workflows/check-links.yml" - ".config/lychee.toml" types: [opened, synchronize, reopened, ready_for_review] diff --git a/.github/workflows/subsystem-benchmarks.yml b/.github/workflows/subsystem-benchmarks.yml new file mode 100644 index 000000000000..f0d56bf6e9d3 --- /dev/null +++ b/.github/workflows/subsystem-benchmarks.yml @@ -0,0 +1,54 @@ +# The actions takes json file as input and runs github-action-benchmark for it. + +on: + workflow_dispatch: + inputs: + benchmark-data-dir-path: + description: "Path to the benchmark data directory" + required: true + type: string + output-file-path: + description: "Path to the benchmark data file" + required: true + type: string + +jobs: + subsystem-benchmarks: + runs-on: ubuntu-latest + environment: subsystem-benchmarks + steps: + - name: Validate inputs + run: | + echo "${{ github.event.inputs.benchmark-data-dir-path }}" | grep -P '^[a-z\-]' + echo "${{ github.event.inputs.output-file-path }}" | grep -P '^[a-z\-]+\.json' + + - name: Checkout Sources + uses: actions/checkout@v4.1.2 + with: + fetch-depth: 0 + ref: "gh-pages" + + - name: Copy bench results + id: step_one + run: | + cp bench/gitlab/${{ github.event.inputs.output-file-path }} ${{ github.event.inputs.output-file-path }} + + - name: Switch branch + id: step_two + run: | + git checkout master -- + + - uses: actions/create-github-app-token@v1 + id: app-token + with: + app-id: ${{ secrets.POLKADOTSDK_GHPAGES_APP_ID }} + private-key: ${{ secrets.POLKADOTSDK_GHPAGES_APP_KEY }} + + - name: Store benchmark result + uses: benchmark-action/github-action-benchmark@v1 + with: + tool: "customSmallerIsBetter" + output-file-path: ${{ github.event.inputs.output-file-path }} + benchmark-data-dir-path: "bench/${{ github.event.inputs.benchmark-data-dir-path }}" + github-token: ${{ steps.app-token.outputs.token }} + auto-push: true diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 7f8796ca5124..93a6ccb9f8fb 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -147,6 +147,13 @@ default: - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues +.publish-gh-pages-refs: + rules: + - if: $CI_PIPELINE_SOURCE == "pipeline" + when: never + - if: $CI_PIPELINE_SOURCE == "web" && $CI_COMMIT_REF_NAME == "master" + - if: $CI_COMMIT_REF_NAME == "master" + # handle the specific case where benches could store incorrect bench data because of the downstream staging runs # exclude cargo-check-benches from such runs .test-refs-check-benches: diff --git a/.gitlab/pipeline/build.yml b/.gitlab/pipeline/build.yml index f8de61355725..44d66eb2f5eb 100644 --- a/.gitlab/pipeline/build.yml +++ b/.gitlab/pipeline/build.yml @@ -350,7 +350,7 @@ build-runtimes-polkavm: - .run-immediately # - .collect-artifact variables: - # this variable gets overriden by "rusty-cachier environment inject", use the value as default + # this variable gets overridden by "rusty-cachier environment inject", use the value as default CARGO_TARGET_DIR: "$CI_PROJECT_DIR/target" before_script: - mkdir -p ./artifacts/subkey diff --git a/.gitlab/pipeline/publish.yml b/.gitlab/pipeline/publish.yml index b73acb560f67..a37ba012a8a7 100644 --- a/.gitlab/pipeline/publish.yml +++ b/.gitlab/pipeline/publish.yml @@ -3,16 +3,13 @@ publish-rustdoc: stage: publish - extends: .kubernetes-env + extends: + - .kubernetes-env + - .publish-gh-pages-refs variables: CI_IMAGE: node:18 GIT_DEPTH: 100 RUSTDOCS_DEPLOY_REFS: "master" - rules: - - if: $CI_PIPELINE_SOURCE == "pipeline" - when: never - - if: $CI_PIPELINE_SOURCE == "web" && $CI_COMMIT_REF_NAME == "master" - - if: $CI_COMMIT_REF_NAME == "master" needs: - job: build-rustdoc artifacts: true @@ -60,9 +57,76 @@ publish-rustdoc: - git commit -m "___Updated docs for ${CI_COMMIT_REF_NAME}___" || echo "___Nothing to commit___" - git push origin gh-pages --force + # artificial sleep to publish gh-pages + - sleep 300 + after_script: + - rm -rf .git/ ./* + +publish-subsystem-benchmarks: + stage: publish + variables: + CI_IMAGE: "paritytech/tools:latest" + extends: + - .kubernetes-env + - .publish-gh-pages-refs + needs: + - job: subsystem-regression-tests + artifacts: true + - job: publish-rustdoc + artifacts: false + script: + # setup ssh + - eval $(ssh-agent) + - ssh-add - <<< ${GITHUB_SSH_PRIV_KEY} + - mkdir ~/.ssh && touch ~/.ssh/known_hosts + - ssh-keyscan -t rsa github.com >> ~/.ssh/known_hosts + # Set git config + - rm -rf .git/config + - git config user.email "devops-team@parity.io" + - git config user.name "${GITHUB_USER}" + - git config remote.origin.url "git@github.com:/paritytech/${CI_PROJECT_NAME}.git" + - git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*" + - git fetch origin gh-pages + # Push result to github + - git checkout gh-pages --force + - mkdir -p bench/gitlab/ || echo "Directory exists" + - rm -rf bench/gitlab/*.json || echo "No json files" + - cp -r charts/*.json bench/gitlab/ + - git add bench/gitlab/ + - git commit -m "Add json files with benchmark results for ${CI_COMMIT_REF_NAME}" + - git push origin gh-pages + # artificial sleep to publish gh-pages + - sleep 300 + allow_failure: true after_script: - rm -rf .git/ ./* +trigger_workflow: + stage: deploy + extends: + - .kubernetes-env + - .publish-gh-pages-refs + needs: + - job: publish-subsystem-benchmarks + artifacts: false + - job: subsystem-regression-tests + artifacts: true + script: + - echo "Triggering workflow" + - | + for benchmark in $(ls charts/*.json); do + export bencmark_name=$(basename $benchmark) + echo "Benchmark: $bencmark_name" + export benchmark_dir=$(echo $bencmark_name | sed 's/\.json//') + curl -q -X POST \ + -H "Accept: application/vnd.github.v3+json" \ + -H "Authorization: token $GITHUB_TOKEN" \ + https://api.github.com/repos/paritytech/${CI_PROJECT_NAME}/actions/workflows/subsystem-benchmarks.yml/dispatches \ + -d '{"ref":"refs/heads/master","inputs":{"benchmark-data-dir-path":"'$benchmark_dir'","output-file-path":"'$bencmark_name'"}}' + sleep 300 + done + allow_failure: true + # note: images are used not only in zombienet but also in rococo, wococo and versi .build-push-image: image: $BUILDAH_IMAGE diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index 26e55e9385f3..48c84b472b43 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -25,7 +25,6 @@ test-linux-stable: # "upgrade_version_checks_should_work" is currently failing - | time cargo nextest run \ - --filter-expr 'not deps(/polkadot-subsystem-bench/)' \ --workspace \ --locked \ --release \ @@ -70,7 +69,7 @@ test-linux-stable-runtime-benchmarks: # but still want to have debug assertions. RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" script: - - time cargo nextest run --filter-expr 'not deps(/polkadot-subsystem-bench/)' --workspace --features runtime-benchmarks benchmark --locked --cargo-profile testnet + - time cargo nextest run --workspace --features runtime-benchmarks benchmark --locked --cargo-profile testnet # can be used to run all tests # test-linux-stable-all: @@ -497,12 +496,19 @@ test-syscalls: subsystem-regression-tests: stage: test + artifacts: + name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}" + when: always + expire_in: 1 days + paths: + - charts/ extends: - .docker-env - .common-refs - .run-immediately script: - - cargo test --profile=testnet -p polkadot-availability-recovery --test availability-recovery-regression-bench --features subsystem-benchmarks + - cargo bench --profile=testnet -p polkadot-availability-recovery --bench availability-recovery-regression-bench --features subsystem-benchmarks + - cargo bench --profile=testnet -p polkadot-availability-distribution --bench availability-distribution-regression-bench --features subsystem-benchmarks tags: - benchmark allow_failure: true diff --git a/.gitlab/pipeline/zombienet.yml b/.gitlab/pipeline/zombienet.yml index 55120e66d0e5..52948e1eb719 100644 --- a/.gitlab/pipeline/zombienet.yml +++ b/.gitlab/pipeline/zombienet.yml @@ -1,7 +1,8 @@ .zombienet-refs: extends: .build-refs variables: - ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.91" + ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.99" + PUSHGATEWAY_URL: "http://zombienet-prometheus-pushgateway.managed-monitoring:9091/metrics/job/zombie-metrics" include: # substrate tests diff --git a/.gitlab/rust-features.sh b/.gitlab/rust-features.sh index c0ac192a6ec6..c3ec61ab8714 100755 --- a/.gitlab/rust-features.sh +++ b/.gitlab/rust-features.sh @@ -15,7 +15,7 @@ # # The steps of this script: # 1. Check that all required dependencies are installed. -# 2. Check that all rules are fullfilled for the whole workspace. If not: +# 2. Check that all rules are fulfilled for the whole workspace. If not: # 4. Check all crates to find the offending ones. # 5. Print all offending crates and exit with code 1. # diff --git a/Cargo.lock b/Cargo.lock index 8a0c21f8a4d3..492739ac4e2f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -191,7 +191,7 @@ checksum = "c0391754c09fab4eae3404d19d0d297aa1c670c1775ab51d8a5312afeca23157" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -206,7 +206,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", "syn-solidity", "tiny-keccak", ] @@ -275,9 +275,9 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.2" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15c4c2c83f81532e5845a733998b6971faca23490340a418e9b72a3ec9de12ea" +checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" [[package]] name = "anstyle-parse" @@ -309,9 +309,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.75" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" +checksum = "0952808a6c2afd1aa8947271f3a60f1a6763c7b912d210184c5149b5cf147247" [[package]] name = "approx" @@ -347,7 +347,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -867,6 +867,7 @@ dependencies = [ "cumulus-pallet-xcmp-queue", "cumulus-primitives-aura", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-utility", "frame-benchmarking", "frame-executive", @@ -989,6 +990,7 @@ dependencies = [ "cumulus-pallet-xcmp-queue", "cumulus-primitives-aura", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-utility", "frame-benchmarking", "frame-executive", @@ -1224,7 +1226,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -1235,13 +1237,13 @@ checksum = "ecc7ab41815b3c653ccd2978ec3255c81349336702dfdf62ee6f7069b12a3aae" [[package]] name = "async-trait" -version = "0.1.74" +version = "0.1.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" +checksum = "a507401cad91ec6a857ed5513a2073c82a9b9048762b885bb98655b306964681" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -1389,7 +1391,7 @@ name = "binary-merkle-tree" version = "13.0.0" dependencies = [ "array-bytes 6.1.0", - "env_logger 0.9.3", + "env_logger 0.11.3", "hash-db", "log", "sp-core", @@ -1423,7 +1425,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -1985,6 +1987,7 @@ dependencies = [ "cumulus-pallet-xcmp-queue", "cumulus-primitives-aura", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-utility", "frame-benchmarking", "frame-executive", @@ -2121,6 +2124,7 @@ dependencies = [ "pallet-message-queue", "pallet-xcm", "parachains-common", + "parity-scale-codec", "rococo-westend-system-emulated-network", "sp-runtime", "staging-xcm", @@ -2153,6 +2157,7 @@ dependencies = [ "cumulus-pallet-xcmp-queue", "cumulus-primitives-aura", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-utility", "frame-benchmarking", "frame-executive", @@ -2594,9 +2599,9 @@ dependencies = [ [[package]] name = "clap-num" -version = "1.1.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e063d263364859dc54fb064cedb7c122740cd4733644b14b176c097f51e8ab7" +checksum = "488557e97528174edaa2ee268b23a809e0c598213a4bbcb4f34575a46fda147e" dependencies = [ "num-traits", ] @@ -2645,7 +2650,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -2709,6 +2714,7 @@ dependencies = [ "cumulus-pallet-xcmp-queue", "cumulus-primitives-aura", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-utility", "frame-benchmarking", "frame-executive", @@ -2813,10 +2819,11 @@ checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" [[package]] name = "colored" -version = "2.1.0" +version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbf2150cce219b664a8a70df7a1f933836724b503f8a413af9365b4dcc4d90b8" +checksum = "2674ec482fbc38012cf31e6c42ba0177b431a0cb6f15fe40efa5aab1bda516f6" dependencies = [ + "is-terminal", "lazy_static", "windows-sys 0.48.0", ] @@ -2962,6 +2969,7 @@ dependencies = [ "cumulus-pallet-xcmp-queue", "cumulus-primitives-aura", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-utility", "frame-benchmarking", "frame-executive", @@ -3055,6 +3063,7 @@ dependencies = [ "cumulus-pallet-xcmp-queue", "cumulus-primitives-aura", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-utility", "frame-benchmarking", "frame-executive", @@ -3118,6 +3127,7 @@ dependencies = [ "cumulus-pallet-xcmp-queue", "cumulus-primitives-aura", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-utility", "frame-benchmarking", "frame-executive", @@ -3768,6 +3778,7 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-core", + "sp-io", "sp-runtime", "sp-transaction-pool", ] @@ -3858,7 +3869,7 @@ dependencies = [ "proc-macro-crate 3.0.0", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -4001,7 +4012,7 @@ dependencies = [ "cumulus-primitives-core", "cumulus-primitives-proof-size-hostfunction", "cumulus-test-runtime", - "docify 0.2.7", + "docify", "frame-support", "frame-system", "log", @@ -4269,7 +4280,6 @@ dependencies = [ "frame-system-rpc-runtime-api", "futures", "jsonrpsee", - "pallet-im-online", "pallet-timestamp", "pallet-transaction-payment", "parachains-common", @@ -4359,7 +4369,7 @@ checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -4399,7 +4409,7 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -4416,7 +4426,7 @@ checksum = "50c49547d73ba8dcfd4ad7325d64c6d5391ff4224d498fc39a6f3f49825a530d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -4519,6 +4529,17 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "derive-syn-parse" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d65d7ce8132b7c0e54497a4d9a55a1c2a0912a0d786cf894472ba818fba45762" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.53", +] + [[package]] name = "derive_more" version = "0.99.17" @@ -4624,7 +4645,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -4666,52 +4687,26 @@ checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" [[package]] name = "docify" -version = "0.1.16" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af1b04e6ef3d21119d3eb7b032bca17f99fe041e9c072f30f32cc0e1a2b1f3c4" +checksum = "43a2f138ad521dc4a2ced1a4576148a6a610b4c5923933b062a263130a6802ce" dependencies = [ - "docify_macros 0.1.16", -] - -[[package]] -name = "docify" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cc4fd38aaa9fb98ac70794c82a00360d1e165a87fbf96a8a91f9dfc602aaee2" -dependencies = [ - "docify_macros 0.2.7", + "docify_macros", ] [[package]] name = "docify_macros" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b5610df7f2acf89a1bb5d1a66ae56b1c7fcdcfe3948856fb3ace3f644d70eb7" -dependencies = [ - "common-path", - "derive-syn-parse", - "lazy_static", - "proc-macro2", - "quote", - "regex", - "syn 2.0.52", - "termcolor", - "walkdir", -] - -[[package]] -name = "docify_macros" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63fa215f3a0d40fb2a221b3aa90d8e1fbb8379785a990cb60d62ac71ebdc6460" +checksum = "1a081e51fb188742f5a7a1164ad752121abcb22874b21e2c3b0dd040c515fdad" dependencies = [ "common-path", - "derive-syn-parse", + "derive-syn-parse 0.2.0", "once_cell", "proc-macro2", "quote", "regex", - "syn 2.0.52", + "syn 2.0.53", "termcolor", "toml 0.8.8", "walkdir", @@ -4939,7 +4934,7 @@ checksum = "5e9a1f9f7d83e59740248a6e14ecf93929ade55027844dfcea78beafccc15745" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -4950,14 +4945,14 @@ checksum = "c2ad8cef1d801a4686bfd8919f0b30eac4c8e48968c437a6405ded4fb5272d2b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] -name = "env_logger" -version = "0.8.4" +name = "env_filter" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3" +checksum = "a009aa4810eb158359dda09d0c87378e4bbb89b5a801f016885a4707ba24f7ea" dependencies = [ "log", "regex", @@ -4965,15 +4960,12 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.9.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" +checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3" dependencies = [ - "atty", - "humantime", "log", "regex", - "termcolor", ] [[package]] @@ -4989,6 +4981,19 @@ dependencies = [ "termcolor", ] +[[package]] +name = "env_logger" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38b35839ba51819680ba087cd351788c9a3c476841207e0b8cee0b04722343b9" +dependencies = [ + "anstream", + "anstyle", + "env_filter", + "humantime", + "log", +] + [[package]] name = "environmental" version = "1.1.4" @@ -5140,7 +5145,7 @@ dependencies = [ "fs-err", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -5398,34 +5403,6 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" -[[package]] -name = "frame" -version = "0.0.1-dev" -dependencies = [ - "docify 0.2.7", - "frame-executive", - "frame-support", - "frame-system", - "frame-system-rpc-runtime-api", - "log", - "pallet-examples", - "parity-scale-codec", - "scale-info", - "sp-api", - "sp-arithmetic", - "sp-block-builder", - "sp-consensus-aura", - "sp-consensus-grandpa", - "sp-core", - "sp-inherents", - "sp-io", - "sp-offchain", - "sp-runtime", - "sp-session", - "sp-transaction-pool", - "sp-version", -] - [[package]] name = "frame-benchmarking" version = "28.0.0" @@ -5524,7 +5501,7 @@ dependencies = [ "quote", "scale-info", "sp-arithmetic", - "syn 2.0.52", + "syn 2.0.53", "trybuild", ] @@ -5626,7 +5603,7 @@ dependencies = [ "array-bytes 6.1.0", "assert_matches", "bitflags 1.3.2", - "docify 0.2.7", + "docify", "environmental", "frame-metadata", "frame-support-procedural", @@ -5668,7 +5645,7 @@ version = "23.0.0" dependencies = [ "Inflector", "cfg-expr", - "derive-syn-parse", + "derive-syn-parse 0.2.0", "expander 2.0.0", "frame-support-procedural-tools", "itertools 0.10.5", @@ -5678,7 +5655,7 @@ dependencies = [ "quote", "regex", "sp-crypto-hashing", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -5689,7 +5666,7 @@ dependencies = [ "proc-macro-crate 3.0.0", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -5698,7 +5675,7 @@ version = "11.0.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -5757,8 +5734,8 @@ dependencies = [ name = "frame-support-test-stg-frame-crate" version = "0.1.0" dependencies = [ - "frame", "parity-scale-codec", + "polkadot-sdk-frame", "scale-info", ] @@ -5768,7 +5745,7 @@ version = "28.0.0" dependencies = [ "cfg-if", "criterion 0.4.0", - "docify 0.2.7", + "docify", "frame-support", "log", "parity-scale-codec", @@ -5927,7 +5904,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -6710,7 +6687,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.3", + "socket2 0.5.6", "widestring", "windows-sys 0.48.0", "winreg", @@ -6792,9 +6769,9 @@ checksum = "078e285eafdfb6c4b434e0d31e8cfcb5115b651496faca5749b88fafd4f23bfd" [[package]] name = "jsonrpsee" -version = "0.22.0" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a95f7cc23d5fab0cdeeaf6bad8c8f5e7a3aa7f0d211957ea78232b327ab27b0" +checksum = "87f3ae45a64cfc0882934f963be9431b2a165d667f53140358181f262aca0702" dependencies = [ "jsonrpsee-core", "jsonrpsee-http-client", @@ -6808,9 +6785,9 @@ dependencies = [ [[package]] name = "jsonrpsee-client-transport" -version = "0.22.0" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b1736cfa3845fd9f8f43751f2b8e0e83f7b6081e754502f7d63b6587692cc83" +checksum = "455fc882e56f58228df2aee36b88a1340eafd707c76af2fa68cf94b37d461131" dependencies = [ "futures-util", "http", @@ -6829,9 +6806,9 @@ dependencies = [ [[package]] name = "jsonrpsee-core" -version = "0.22.0" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82030d038658974732103e623ba2e0abec03bbbe175b39c0a2fafbada60c5868" +checksum = "b75568f4f9696e3a47426e1985b548e1a9fcb13372a5e320372acaf04aca30d1" dependencies = [ "anyhow", "async-lock 3.3.0", @@ -6855,9 +6832,9 @@ dependencies = [ [[package]] name = "jsonrpsee-http-client" -version = "0.22.0" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36a06ef0de060005fddf772d54597bb6a8b0413da47dcffd304b0306147b9678" +checksum = "9e7a95e346f55df84fb167b7e06470e196e7d5b9488a21d69c5d9732043ba7ba" dependencies = [ "async-trait", "hyper", @@ -6875,22 +6852,22 @@ dependencies = [ [[package]] name = "jsonrpsee-proc-macros" -version = "0.22.0" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69fc56131589f82e57805f7338b87023db4aafef813555708b159787e34ad6bc" +checksum = "30ca066e73dd70294aebc5c2675d8ffae43be944af027c857ce0d4c51785f014" dependencies = [ "heck 0.4.1", "proc-macro-crate 3.0.0", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.53", ] [[package]] name = "jsonrpsee-server" -version = "0.22.0" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d85be77fe5b2a94589e3164fb780017f7aff7d646b49278c0d0346af16975c8e" +checksum = "0e29c1bd1f9bba83c864977c73404e505f74f730fa0db89dd490ec174e36d7f0" dependencies = [ "futures-util", "http", @@ -6912,9 +6889,9 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.22.0" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a48fdc1202eafc51c63e00406575e59493284ace8b8b61aa16f3a6db5d64f1a" +checksum = "3467fd35feeee179f71ab294516bdf3a81139e7aeebdd860e46897c12e1a3368" dependencies = [ "anyhow", "beef", @@ -6925,9 +6902,9 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-client" -version = "0.22.0" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5ce25d70a8e4d3cc574bbc3cad0137c326ad64b194793d5e7bbdd3fa4504181" +checksum = "68ca71e74983f624c0cb67828e480a981586074da8ad3a2f214c6a3f884edab9" dependencies = [ "http", "jsonrpsee-client-transport", @@ -7870,7 +7847,7 @@ dependencies = [ "macro_magic_core", "macro_magic_macros", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -7880,11 +7857,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "468155613a44cfd825f1fb0ffa532b018253920d404e6fca1e8d43155198a46d" dependencies = [ "const-random", - "derive-syn-parse", + "derive-syn-parse 0.1.5", "macro_magic_core_macros", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -7895,7 +7872,7 @@ checksum = "9ea73aa640dc01d62a590d48c0c3521ed739d53b27f919b25c3551e233481654" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -7906,7 +7883,7 @@ checksum = "ef9d79ae96aaba821963320eb2b6e34d17df1e5a83d8a1985c29cc5be59577b3" dependencies = [ "macro_magic_core", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -8064,11 +8041,11 @@ name = "minimal-template-node" version = "0.0.0" dependencies = [ "clap 4.5.3", - "frame", "futures", "futures-timer", "jsonrpsee", "minimal-template-runtime", + "polkadot-sdk-frame", "sc-basic-authorship", "sc-cli", "sc-client-api", @@ -8098,7 +8075,6 @@ dependencies = [ name = "minimal-template-runtime" version = "0.0.0" dependencies = [ - "frame", "pallet-balances", "pallet-minimal-template", "pallet-sudo", @@ -8106,6 +8082,7 @@ dependencies = [ "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "parity-scale-codec", + "polkadot-sdk-frame", "scale-info", "sp-genesis-builder", "substrate-wasm-builder", @@ -9141,7 +9118,7 @@ name = "pallet-bags-list" version = "27.0.0" dependencies = [ "aquamarine 0.5.0", - "docify 0.2.7", + "docify", "frame-benchmarking", "frame-election-provider-support", "frame-support", @@ -9187,7 +9164,7 @@ dependencies = [ name = "pallet-balances" version = "28.0.0" dependencies = [ - "docify 0.2.7", + "docify", "frame-benchmarking", "frame-support", "frame-system", @@ -9445,7 +9422,7 @@ dependencies = [ "array-bytes 6.1.0", "assert_matches", "bitflags 1.3.2", - "env_logger 0.9.3", + "env_logger 0.11.3", "environmental", "frame-benchmarking", "frame-support", @@ -9463,6 +9440,7 @@ dependencies = [ "pallet-timestamp", "pallet-utility", "parity-scale-codec", + "paste", "pretty_assertions", "rand", "rand_pcg", @@ -9539,7 +9517,7 @@ version = "18.0.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -9682,7 +9660,7 @@ dependencies = [ "sp-npos-elections", "sp-runtime", "sp-tracing 16.0.0", - "strum 0.24.1", + "strum 0.26.2", ] [[package]] @@ -9737,8 +9715,8 @@ dependencies = [ name = "pallet-example-frame-crate" version = "0.0.1" dependencies = [ - "frame", "parity-scale-codec", + "polkadot-sdk-frame", "scale-info", ] @@ -9778,7 +9756,7 @@ dependencies = [ name = "pallet-example-single-block-migrations" version = "0.0.1" dependencies = [ - "docify 0.2.7", + "docify", "frame-executive", "frame-support", "frame-system", @@ -9841,7 +9819,7 @@ dependencies = [ name = "pallet-fast-unstake" version = "27.0.0" dependencies = [ - "docify 0.2.7", + "docify", "frame-benchmarking", "frame-election-provider-support", "frame-support", @@ -10032,7 +10010,7 @@ dependencies = [ name = "pallet-migrations" version = "1.0.0" dependencies = [ - "docify 0.1.16", + "docify", "frame-benchmarking", "frame-executive", "frame-support", @@ -10055,8 +10033,8 @@ dependencies = [ name = "pallet-minimal-template" version = "0.0.0" dependencies = [ - "frame", "parity-scale-codec", + "polkadot-sdk-frame", "scale-info", ] @@ -10083,7 +10061,7 @@ name = "pallet-mmr" version = "27.0.0" dependencies = [ "array-bytes 6.1.0", - "env_logger 0.9.3", + "env_logger 0.11.3", "frame-benchmarking", "frame-support", "frame-system", @@ -10321,7 +10299,7 @@ dependencies = [ name = "pallet-paged-list" version = "0.6.0" dependencies = [ - "docify 0.2.7", + "docify", "frame-benchmarking", "frame-support", "frame-system", @@ -10362,7 +10340,7 @@ dependencies = [ name = "pallet-parameters" version = "0.0.1" dependencies = [ - "docify 0.2.7", + "docify", "frame-benchmarking", "frame-support", "frame-system", @@ -10514,7 +10492,7 @@ dependencies = [ name = "pallet-safe-mode" version = "9.0.0" dependencies = [ - "docify 0.2.7", + "docify", "frame-benchmarking", "frame-support", "frame-system", @@ -10568,7 +10546,7 @@ dependencies = [ name = "pallet-scheduler" version = "29.0.0" dependencies = [ - "docify 0.2.7", + "docify", "frame-benchmarking", "frame-support", "frame-system", @@ -10707,7 +10685,7 @@ dependencies = [ "proc-macro2", "quote", "sp-runtime", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -10772,7 +10750,7 @@ dependencies = [ name = "pallet-sudo" version = "28.0.0" dependencies = [ - "docify 0.2.7", + "docify", "frame-benchmarking", "frame-support", "frame-system", @@ -10801,7 +10779,7 @@ dependencies = [ name = "pallet-timestamp" version = "27.0.0" dependencies = [ - "docify 0.2.7", + "docify", "frame-benchmarking", "frame-support", "frame-system", @@ -10901,7 +10879,7 @@ dependencies = [ name = "pallet-treasury" version = "27.0.0" dependencies = [ - "docify 0.2.7", + "docify", "frame-benchmarking", "frame-support", "frame-system", @@ -10920,7 +10898,7 @@ dependencies = [ name = "pallet-tx-pause" version = "9.0.0" dependencies = [ - "docify 0.2.7", + "docify", "frame-benchmarking", "frame-support", "frame-system", @@ -11023,6 +11001,7 @@ dependencies = [ "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", + "xcm-fee-payment-runtime-api", ] [[package]] @@ -11593,6 +11572,7 @@ dependencies = [ "cumulus-pallet-xcmp-queue", "cumulus-primitives-aura", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-utility", "enumflags2", "frame-benchmarking", @@ -11691,6 +11671,7 @@ dependencies = [ "cumulus-pallet-xcmp-queue", "cumulus-primitives-aura", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-utility", "enumflags2", "frame-benchmarking", @@ -11779,7 +11760,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -11820,7 +11801,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -11918,7 +11899,7 @@ version = "7.0.0" dependencies = [ "assert_matches", "bitvec", - "env_logger 0.9.3", + "env_logger 0.11.3", "futures", "futures-timer", "itertools 0.10.5", @@ -11948,7 +11929,7 @@ dependencies = [ "always-assert", "assert_matches", "bitvec", - "env_logger 0.9.3", + "env_logger 0.11.3", "futures", "futures-timer", "log", @@ -12004,7 +11985,7 @@ version = "7.0.0" dependencies = [ "assert_matches", "async-trait", - "env_logger 0.9.3", + "env_logger 0.11.3", "fatality", "futures", "futures-timer", @@ -12066,7 +12047,7 @@ version = "7.0.0" dependencies = [ "assert_matches", "bitvec", - "env_logger 0.9.3", + "env_logger 0.11.3", "fatality", "futures", "futures-timer", @@ -12221,6 +12202,7 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", + "rstest", "sp-core", "sp-keyring", "sp-maybe-compressed-blob", @@ -12236,7 +12218,7 @@ dependencies = [ "async-trait", "bitvec", "derive_more", - "env_logger 0.9.3", + "env_logger 0.11.3", "futures", "futures-timer", "itertools 0.10.5", @@ -12278,7 +12260,7 @@ version = "7.0.0" dependencies = [ "assert_matches", "bitvec", - "env_logger 0.9.3", + "env_logger 0.11.3", "futures", "futures-timer", "kvdb", @@ -12720,7 +12702,7 @@ dependencies = [ "rand_chacha 0.3.1", "sc-authority-discovery", "sc-network", - "strum 0.24.1", + "strum 0.26.2", "thiserror", "tracing-gum", ] @@ -12812,7 +12794,7 @@ dependencies = [ "assert_matches", "async-trait", "derive_more", - "env_logger 0.9.3", + "env_logger 0.11.3", "fatality", "futures", "futures-channel", @@ -13185,8 +13167,7 @@ version = "0.0.1" dependencies = [ "cumulus-pallet-aura-ext", "cumulus-pallet-parachain-system", - "docify 0.2.7", - "frame", + "docify", "frame-executive", "frame-support", "frame-system", @@ -13194,7 +13175,9 @@ dependencies = [ "pallet-assets", "pallet-aura", "pallet-authorship", + "pallet-babe", "pallet-balances", + "pallet-broker", "pallet-collective", "pallet-default-config-example", "pallet-democracy", @@ -13202,12 +13185,17 @@ dependencies = [ "pallet-example-single-block-migrations", "pallet-examples", "pallet-multisig", + "pallet-nfts", + "pallet-preimage", "pallet-proxy", + "pallet-referenda", "pallet-scheduler", "pallet-timestamp", "pallet-transaction-payment", + "pallet-uniques", "pallet-utility", "parity-scale-codec", + "polkadot-sdk-frame", "sc-cli", "sc-client-db", "sc-consensus-aura", @@ -13222,6 +13210,7 @@ dependencies = [ "scale-info", "simple-mermaid", "sp-api", + "sp-arithmetic", "sp-core", "sp-io", "sp-keyring", @@ -13236,6 +13225,35 @@ dependencies = [ "substrate-wasm-builder", ] +[[package]] +name = "polkadot-sdk-frame" +version = "0.1.0" +dependencies = [ + "docify", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-rpc-runtime-api", + "log", + "pallet-examples", + "parity-scale-codec", + "scale-info", + "sp-api", + "sp-arithmetic", + "sp-block-builder", + "sp-consensus-aura", + "sp-consensus-grandpa", + "sp-core", + "sp-inherents", + "sp-io", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-std 14.0.0", + "sp-transaction-pool", + "sp-version", +] + [[package]] name = "polkadot-service" version = "7.0.0" @@ -13243,7 +13261,7 @@ dependencies = [ "assert_matches", "async-trait", "bitvec", - "env_logger 0.9.3", + "env_logger 0.11.3", "frame-benchmarking", "frame-benchmarking-cli", "frame-support", @@ -13257,7 +13275,6 @@ dependencies = [ "log", "mmr-gadget", "pallet-babe", - "pallet-im-online", "pallet-staking", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", @@ -13354,12 +13371,14 @@ dependencies = [ "sp-transaction-pool", "sp-version", "sp-weights", + "staging-xcm", "substrate-prometheus-endpoint", "tempfile", "thiserror", "tracing-gum", "westend-runtime", "westend-runtime-constants", + "xcm-fee-payment-runtime-api", ] [[package]] @@ -13418,7 +13437,7 @@ dependencies = [ "clap-num", "color-eyre", "colored", - "env_logger 0.9.3", + "env_logger 0.11.3", "futures", "futures-timer", "hex", @@ -13458,6 +13477,7 @@ dependencies = [ "sc-service", "schnorrkel 0.11.4", "serde", + "serde_json", "serde_yaml", "sha1", "sp-application-crypto", @@ -13709,7 +13729,7 @@ dependencies = [ "polkavm-common", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -13719,7 +13739,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ba81f7b5faac81e528eb6158a6f3c9e0bb1008e0ffa19653bc8dea925ecb429" dependencies = [ "polkavm-derive-impl", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -13915,7 +13935,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c64d9ba0963cdcea2e1b2230fbae2bab30eb25a174be395c41e764bfb65dd62" dependencies = [ "proc-macro2", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -14006,7 +14026,7 @@ checksum = "9b698b0b09d40e9b7c1a47b132d66a8b54bcd20583d9b6d06e4535e383b4405c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -14078,7 +14098,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -14178,7 +14198,7 @@ dependencies = [ "itertools 0.11.0", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -14521,7 +14541,7 @@ checksum = "7f7473c2cfcf90008193dd0e3e16599455cb601a9fce322b5bb55de799664925" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -14786,6 +14806,7 @@ dependencies = [ "cumulus-ping", "cumulus-primitives-aura", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-utility", "frame-benchmarking", "frame-executive", @@ -14855,7 +14876,6 @@ dependencies = [ "pallet-elections-phragmen", "pallet-grandpa", "pallet-identity", - "pallet-im-online", "pallet-indices", "pallet-membership", "pallet-message-queue", @@ -14925,6 +14945,7 @@ dependencies = [ "substrate-wasm-builder", "tiny-keccak", "tokio", + "xcm-fee-payment-runtime-api", ] [[package]] @@ -15010,7 +15031,7 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.0", - "syn 2.0.52", + "syn 2.0.53", "unicode-ident", ] @@ -15365,6 +15386,7 @@ dependencies = [ "futures-timer", "ip_network", "libp2p", + "linked_hash_set", "log", "multihash 0.18.1", "multihash-codetable", @@ -15433,7 +15455,7 @@ name = "sc-chain-spec" version = "27.0.0" dependencies = [ "array-bytes 6.1.0", - "docify 0.2.7", + "docify", "log", "memmap2 0.9.3", "parity-scale-codec", @@ -15464,7 +15486,7 @@ dependencies = [ "proc-macro-crate 3.0.0", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -15946,7 +15968,7 @@ dependencies = [ "array-bytes 6.1.0", "assert_matches", "criterion 0.4.0", - "env_logger 0.9.3", + "env_logger 0.11.3", "num_cpus", "parity-scale-codec", "parking_lot 0.12.1", @@ -16382,7 +16404,7 @@ name = "sc-rpc" version = "29.0.0" dependencies = [ "assert_matches", - "env_logger 0.9.3", + "env_logger 0.11.3", "futures", "jsonrpsee", "log", @@ -16623,7 +16645,7 @@ dependencies = [ name = "sc-statement-store" version = "10.0.0" dependencies = [ - "env_logger 0.9.3", + "env_logger 0.11.3", "log", "parity-db", "parking_lot 0.12.1", @@ -16744,7 +16766,7 @@ dependencies = [ "proc-macro-crate 3.0.0", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -16814,9 +16836,9 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.10.0" +version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f7d66a1128282b7ef025a8ead62a4a9fcf017382ec53b8ffbf4d7bf77bd3c60" +checksum = "788745a868b0e751750388f4e6546eb921ef714a4317fa6954f7cde114eb2eb7" dependencies = [ "bitvec", "cfg-if", @@ -16828,9 +16850,9 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.10.0" +version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abf2c68b89cafb3b8d918dd07b42be0da66ff202cf1155c5739a4e0c1ea0dc19" +checksum = "7dc2f4e8bc344b9fc3d5f74f72c2e55bfc38d28dc2ebc69c194a3df424e4d9ac" dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", @@ -17145,7 +17167,7 @@ checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -17202,9 +17224,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.9.32" +version = "0.9.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fd075d994154d4a774f95b51fb96bdc2832b0ea48425c92546073816cda1f2f" +checksum = "a0623d197252096520c6f2a5e1171ee436e5af99a5d7caa2891e55e61950e6d9" dependencies = [ "indexmap 2.2.3", "itoa", @@ -17245,7 +17267,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -17699,7 +17721,7 @@ name = "snowbridge-outbound-queue-merkle-tree" version = "0.3.0" dependencies = [ "array-bytes 4.2.0", - "env_logger 0.9.3", + "env_logger 0.11.3", "hex", "hex-literal", "parity-scale-codec", @@ -17998,12 +18020,12 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.3" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2538b18701741680e0322a2302176d3253a35388e2e62f172f64f4f16605f877" +checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" dependencies = [ "libc", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -18135,7 +18157,7 @@ dependencies = [ "proc-macro-crate 3.0.0", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -18188,6 +18210,7 @@ name = "sp-arithmetic" version = "23.0.0" dependencies = [ "criterion 0.4.0", + "docify", "integer-sqrt", "num-traits", "parity-scale-codec", @@ -18196,6 +18219,7 @@ dependencies = [ "scale-info", "serde", "sp-crypto-hashing", + "sp-std 14.0.0", "static_assertions", ] @@ -18329,7 +18353,7 @@ dependencies = [ "sp-keystore", "sp-mmr-primitives", "sp-runtime", - "strum 0.24.1", + "strum 0.26.2", "w3f-bls", ] @@ -18516,7 +18540,7 @@ version = "0.0.0" dependencies = [ "quote", "sp-crypto-hashing", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -18534,7 +18558,7 @@ source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf5 dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -18543,7 +18567,7 @@ version = "14.0.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -18618,7 +18642,7 @@ version = "31.0.0" dependencies = [ "sp-core", "sp-runtime", - "strum 0.24.1", + "strum 0.26.2", ] [[package]] @@ -18734,7 +18758,7 @@ dependencies = [ name = "sp-runtime" version = "31.0.1" dependencies = [ - "docify 0.2.7", + "docify", "either", "hash256-std-hasher", "impl-trait-for-tuples", @@ -18808,7 +18832,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -18820,7 +18844,7 @@ dependencies = [ "proc-macro-crate 3.0.0", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -19076,7 +19100,7 @@ dependencies = [ "proc-macro2", "quote", "sp-version", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -19493,6 +19517,15 @@ version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" +[[package]] +name = "strum" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d8cec3501a5194c432b2b7976db6b7d10ec95c253208b45f83f7136aa985e29" +dependencies = [ + "strum_macros 0.26.2", +] + [[package]] name = "strum_macros" version = "0.24.3" @@ -19516,7 +19549,20 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.52", + "syn 2.0.53", +] + +[[package]] +name = "strum_macros" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6cf59daf282c0a494ba14fd21610a0325f9f90ec9d1231dea26bcb1d696c946" +dependencies = [ + "heck 0.4.1", + "proc-macro2", + "quote", + "rustversion", + "syn 2.0.53", ] [[package]] @@ -19784,7 +19830,7 @@ dependencies = [ "parity-wasm", "polkavm-linker", "sp-maybe-compressed-blob", - "strum 0.24.1", + "strum 0.26.2", "tempfile", "toml 0.8.8", "walkdir", @@ -19913,9 +19959,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.52" +version = "2.0.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b699d15b36d1f02c3e7c69f8ffef53de37aefae075d8488d4ba1a7788d574a07" +checksum = "7383cd0e49fff4b6b90ca5670bfd3e9d6a733b3f90c686605aa7eec8c4996032" dependencies = [ "proc-macro2", "quote", @@ -19931,7 +19977,7 @@ dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -20194,7 +20240,7 @@ checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -20330,9 +20376,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.33.0" +version = "1.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f38200e3ef7995e5ef13baec2f432a6da0aa9ac495b2c0e8f3b7eec2c92d653" +checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" dependencies = [ "backtrace", "bytes", @@ -20342,20 +20388,20 @@ dependencies = [ "parking_lot 0.12.1", "pin-project-lite 0.2.12", "signal-hook-registry", - "socket2 0.5.3", + "socket2 0.5.6", "tokio-macros", "windows-sys 0.48.0", ] [[package]] name = "tokio-macros" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" +checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -20562,7 +20608,7 @@ checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -20604,7 +20650,7 @@ dependencies = [ "proc-macro-crate 3.0.0", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -20959,9 +21005,9 @@ dependencies = [ [[package]] name = "unsafe-libyaml" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab4c90930b95a82d00dc9e9ac071b4991924390d46cbd0dfe566148667605e4b" +checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" [[package]] name = "unsigned-varint" @@ -21169,7 +21215,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", "wasm-bindgen-shared", ] @@ -21203,7 +21249,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -21664,7 +21710,6 @@ dependencies = [ "pallet-fast-unstake", "pallet-grandpa", "pallet-identity", - "pallet-im-online", "pallet-indices", "pallet-membership", "pallet-message-queue", @@ -21738,6 +21783,7 @@ dependencies = [ "tiny-keccak", "tokio", "westend-runtime-constants", + "xcm-fee-payment-runtime-api", ] [[package]] @@ -22210,6 +22256,20 @@ dependencies = [ "staging-xcm-executor", ] +[[package]] +name = "xcm-fee-payment-runtime-api" +version = "0.1.0" +dependencies = [ + "frame-support", + "parity-scale-codec", + "scale-info", + "sp-api", + "sp-runtime", + "sp-std 14.0.0", + "sp-weights", + "staging-xcm", +] + [[package]] name = "xcm-procedural" version = "7.0.0" @@ -22218,7 +22278,7 @@ dependencies = [ "proc-macro2", "quote", "staging-xcm", - "syn 2.0.52", + "syn 2.0.53", "trybuild", ] @@ -22337,7 +22397,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -22357,7 +22417,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 7f62d8657b06..c10e6d6ef80b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,24 +10,24 @@ resolver = "2" members = [ "bridges/bin/runtime-common", + "bridges/chains/chain-asset-hub-rococo", + "bridges/chains/chain-asset-hub-westend", + "bridges/chains/chain-bridge-hub-cumulus", + "bridges/chains/chain-bridge-hub-kusama", + "bridges/chains/chain-bridge-hub-polkadot", + "bridges/chains/chain-bridge-hub-rococo", + "bridges/chains/chain-bridge-hub-westend", + "bridges/chains/chain-kusama", + "bridges/chains/chain-polkadot", + "bridges/chains/chain-polkadot-bulletin", + "bridges/chains/chain-rococo", + "bridges/chains/chain-westend", "bridges/modules/grandpa", "bridges/modules/messages", "bridges/modules/parachains", "bridges/modules/relayers", "bridges/modules/xcm-bridge-hub", "bridges/modules/xcm-bridge-hub-router", - "bridges/primitives/chain-asset-hub-rococo", - "bridges/primitives/chain-asset-hub-westend", - "bridges/primitives/chain-bridge-hub-cumulus", - "bridges/primitives/chain-bridge-hub-kusama", - "bridges/primitives/chain-bridge-hub-polkadot", - "bridges/primitives/chain-bridge-hub-rococo", - "bridges/primitives/chain-bridge-hub-westend", - "bridges/primitives/chain-kusama", - "bridges/primitives/chain-polkadot", - "bridges/primitives/chain-polkadot-bulletin", - "bridges/primitives/chain-rococo", - "bridges/primitives/chain-westend", "bridges/primitives/header-chain", "bridges/primitives/messages", "bridges/primitives/parachains", @@ -214,6 +214,7 @@ members = [ "polkadot/xcm/xcm-builder", "polkadot/xcm/xcm-executor", "polkadot/xcm/xcm-executor/integration-tests", + "polkadot/xcm/xcm-fee-payment-runtime-api", "polkadot/xcm/xcm-simulator", "polkadot/xcm/xcm-simulator/example", "polkadot/xcm/xcm-simulator/fuzzer", @@ -553,7 +554,7 @@ serde-big-array = { version = "0.3.2" } serde_derive = { version = "1.0.117" } serde_json = { version = "1.0.114", default-features = false } serde_yaml = { version = "0.9" } -syn = { version = "2.0.52" } +syn = { version = "2.0.53" } thiserror = { version = "1.0.48" } [profile.release] diff --git a/bridges/README.md b/bridges/README.md index a2ce213d2541..8bfa39841f51 100644 --- a/bridges/README.md +++ b/bridges/README.md @@ -38,10 +38,10 @@ cargo test --all ``` Also you can build the repo with [Parity CI Docker -image](https://github.com/paritytech/scripts/tree/master/dockerfiles/bridges-ci): +image](https://github.com/paritytech/scripts/tree/master/dockerfiles/ci-unified): ```bash -docker pull paritytech/bridges-ci:production +docker pull paritytech/ci-unified:latest mkdir ~/cache chown 1000:1000 ~/cache #processes in the container runs as "nonroot" user with UID 1000 docker run --rm -it -w /shellhere/parity-bridges-common \ @@ -49,7 +49,7 @@ docker run --rm -it -w /shellhere/parity-bridges-common \ -v "$(pwd)":/shellhere/parity-bridges-common \ -e CARGO_HOME=/cache/cargo/ \ -e SCCACHE_DIR=/cache/sccache/ \ - -e CARGO_TARGET_DIR=/cache/target/ paritytech/bridges-ci:production cargo build --all + -e CARGO_TARGET_DIR=/cache/target/ paritytech/ci-unified:latest cargo build --all #artifacts can be found in ~/cache/target ``` diff --git a/bridges/bin/runtime-common/Cargo.toml b/bridges/bin/runtime-common/Cargo.toml index ce38488a77af..f4834f5b3da6 100644 --- a/bridges/bin/runtime-common/Cargo.toml +++ b/bridges/bin/runtime-common/Cargo.toml @@ -11,10 +11,10 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } hash-db = { version = "0.16.0", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } static_assertions = { version = "1.1", optional = true } # Bridge dependencies diff --git a/bridges/bin/runtime-common/src/messages_api.rs b/bridges/bin/runtime-common/src/messages_api.rs index d160bc9f5f45..0dfb0d9458f1 100644 --- a/bridges/bin/runtime-common/src/messages_api.rs +++ b/bridges/bin/runtime-common/src/messages_api.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! Helpers for implementing various message-related runtime API mthods. +//! Helpers for implementing various message-related runtime API methods. #[cfg(not(feature = "std"))] use alloc::vec::Vec; diff --git a/bridges/bin/runtime-common/src/messages_xcm_extension.rs b/bridges/bin/runtime-common/src/messages_xcm_extension.rs index fbe2acfdf898..8e161ef254eb 100644 --- a/bridges/bin/runtime-common/src/messages_xcm_extension.rs +++ b/bridges/bin/runtime-common/src/messages_xcm_extension.rs @@ -248,7 +248,7 @@ impl LocalXcmQueueManager { sender_and_lane: &SenderAndLane, enqueued_messages: MessageNonce, ) { - // skip if we dont want to handle congestion + // skip if we don't want to handle congestion if !H::supports_congestion_detection() { return } diff --git a/bridges/bin/runtime-common/src/mock.rs b/bridges/bin/runtime-common/src/mock.rs index deee4524e858..8c4cb2233e17 100644 --- a/bridges/bin/runtime-common/src/mock.rs +++ b/bridges/bin/runtime-common/src/mock.rs @@ -379,7 +379,7 @@ impl Chain for BridgedUnderlyingChain { impl ChainWithGrandpa for BridgedUnderlyingChain { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = ""; const MAX_AUTHORITIES_COUNT: u32 = 16; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 8; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = 8; const MAX_MANDATORY_HEADER_SIZE: u32 = 256; const AVERAGE_HEADER_SIZE: u32 = 64; } diff --git a/bridges/bin/runtime-common/src/priority_calculator.rs b/bridges/bin/runtime-common/src/priority_calculator.rs index a597fb9e2f49..5035553f508d 100644 --- a/bridges/bin/runtime-common/src/priority_calculator.rs +++ b/bridges/bin/runtime-common/src/priority_calculator.rs @@ -128,7 +128,7 @@ mod integrity_tests { Runtime::RuntimeCall: Dispatchable, BalanceOf: Send + Sync + FixedPointOperand, { - // esimate priority of transaction that delivers one message and has large tip + // estimate priority of transaction that delivers one message and has large tip let maximal_messages_in_delivery_transaction = Runtime::MaxUnconfirmedMessagesAtInboundLane::get(); let small_with_tip_priority = @@ -163,7 +163,7 @@ mod integrity_tests { { // just an estimation of extra transaction bytes that are added to every transaction // (including signature, signed extensions extra and etc + in our case it includes - // all call arguments extept the proof itself) + // all call arguments except the proof itself) let base_tx_size = 512; // let's say we are relaying similar small messages and for every message we add more trie // nodes to the proof (x0.5 because we expect some nodes to be reused) diff --git a/bridges/bin/runtime-common/src/refund_relayer_extension.rs b/bridges/bin/runtime-common/src/refund_relayer_extension.rs index f5fdfa07d24a..030e7d60e8e1 100644 --- a/bridges/bin/runtime-common/src/refund_relayer_extension.rs +++ b/bridges/bin/runtime-common/src/refund_relayer_extension.rs @@ -16,7 +16,7 @@ //! Signed extension that refunds relayer if he has delivered some new messages. //! It also refunds transaction cost if the transaction is an `utility.batchAll()` -//! with calls that are: delivering new messsage and all necessary underlying headers +//! with calls that are: delivering new message and all necessary underlying headers //! (parachain or relay chain). use crate::messages_call_ext::{ @@ -1540,7 +1540,7 @@ mod tests { } #[test] - fn validate_boosts_priority_of_message_delivery_transactons() { + fn validate_boosts_priority_of_message_delivery_transactions() { run_test(|| { initialize_environment(100, 100, 100); @@ -1570,7 +1570,7 @@ mod tests { } #[test] - fn validate_does_not_boost_priority_of_message_delivery_transactons_with_too_many_messages() { + fn validate_does_not_boost_priority_of_message_delivery_transactions_with_too_many_messages() { run_test(|| { initialize_environment(100, 100, 100); diff --git a/bridges/primitives/chain-asset-hub-rococo/Cargo.toml b/bridges/chains/chain-asset-hub-rococo/Cargo.toml similarity index 69% rename from bridges/primitives/chain-asset-hub-rococo/Cargo.toml rename to bridges/chains/chain-asset-hub-rococo/Cargo.toml index 4dfa149e0ea9..9a6419a5b405 100644 --- a/bridges/primitives/chain-asset-hub-rococo/Cargo.toml +++ b/bridges/chains/chain-asset-hub-rococo/Cargo.toml @@ -5,19 +5,20 @@ version = "0.4.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate Dependencies frame-support = { path = "../../../substrate/frame/support", default-features = false } # Bridge Dependencies -bp-xcm-bridge-hub-router = { path = "../xcm-bridge-hub-router", default-features = false } +bp-xcm-bridge-hub-router = { path = "../../primitives/xcm-bridge-hub-router", default-features = false } [features] default = ["std"] diff --git a/bridges/primitives/chain-asset-hub-rococo/src/lib.rs b/bridges/chains/chain-asset-hub-rococo/src/lib.rs similarity index 100% rename from bridges/primitives/chain-asset-hub-rococo/src/lib.rs rename to bridges/chains/chain-asset-hub-rococo/src/lib.rs diff --git a/bridges/primitives/chain-asset-hub-westend/Cargo.toml b/bridges/chains/chain-asset-hub-westend/Cargo.toml similarity index 69% rename from bridges/primitives/chain-asset-hub-westend/Cargo.toml rename to bridges/chains/chain-asset-hub-westend/Cargo.toml index c9bd437562b8..1c08ee28e417 100644 --- a/bridges/primitives/chain-asset-hub-westend/Cargo.toml +++ b/bridges/chains/chain-asset-hub-westend/Cargo.toml @@ -5,19 +5,20 @@ version = "0.3.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate Dependencies frame-support = { path = "../../../substrate/frame/support", default-features = false } # Bridge Dependencies -bp-xcm-bridge-hub-router = { path = "../xcm-bridge-hub-router", default-features = false } +bp-xcm-bridge-hub-router = { path = "../../primitives/xcm-bridge-hub-router", default-features = false } [features] default = ["std"] diff --git a/bridges/primitives/chain-asset-hub-westend/src/lib.rs b/bridges/chains/chain-asset-hub-westend/src/lib.rs similarity index 100% rename from bridges/primitives/chain-asset-hub-westend/src/lib.rs rename to bridges/chains/chain-asset-hub-westend/src/lib.rs diff --git a/bridges/primitives/chain-bridge-hub-cumulus/Cargo.toml b/bridges/chains/chain-bridge-hub-cumulus/Cargo.toml similarity index 76% rename from bridges/primitives/chain-bridge-hub-cumulus/Cargo.toml rename to bridges/chains/chain-bridge-hub-cumulus/Cargo.toml index ea7401201aff..db68300aefc3 100644 --- a/bridges/primitives/chain-bridge-hub-cumulus/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-cumulus/Cargo.toml @@ -5,6 +5,7 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true @@ -12,9 +13,9 @@ workspace = true [dependencies] # Bridge Dependencies -bp-polkadot-core = { path = "../polkadot-core", default-features = false } -bp-messages = { path = "../messages", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } +bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } +bp-messages = { path = "../../primitives/messages", default-features = false } +bp-runtime = { path = "../../primitives/runtime", default-features = false } # Substrate Based Dependencies diff --git a/bridges/primitives/chain-bridge-hub-cumulus/src/lib.rs b/bridges/chains/chain-bridge-hub-cumulus/src/lib.rs similarity index 100% rename from bridges/primitives/chain-bridge-hub-cumulus/src/lib.rs rename to bridges/chains/chain-bridge-hub-cumulus/src/lib.rs diff --git a/bridges/primitives/chain-bridge-hub-kusama/Cargo.toml b/bridges/chains/chain-bridge-hub-kusama/Cargo.toml similarity index 82% rename from bridges/primitives/chain-bridge-hub-kusama/Cargo.toml rename to bridges/chains/chain-bridge-hub-kusama/Cargo.toml index 35aafea91a6a..7ab4a2b18884 100644 --- a/bridges/primitives/chain-bridge-hub-kusama/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-kusama/Cargo.toml @@ -5,6 +5,7 @@ version = "0.6.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true @@ -13,8 +14,8 @@ workspace = true # Bridge Dependencies bp-bridge-hub-cumulus = { path = "../chain-bridge-hub-cumulus", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } -bp-messages = { path = "../messages", default-features = false } +bp-runtime = { path = "../../primitives/runtime", default-features = false } +bp-messages = { path = "../../primitives/messages", default-features = false } # Substrate Based Dependencies diff --git a/bridges/primitives/chain-bridge-hub-kusama/src/lib.rs b/bridges/chains/chain-bridge-hub-kusama/src/lib.rs similarity index 100% rename from bridges/primitives/chain-bridge-hub-kusama/src/lib.rs rename to bridges/chains/chain-bridge-hub-kusama/src/lib.rs diff --git a/bridges/primitives/chain-bridge-hub-polkadot/Cargo.toml b/bridges/chains/chain-bridge-hub-polkadot/Cargo.toml similarity index 82% rename from bridges/primitives/chain-bridge-hub-polkadot/Cargo.toml rename to bridges/chains/chain-bridge-hub-polkadot/Cargo.toml index 771b160f190d..8d3505447ce6 100644 --- a/bridges/primitives/chain-bridge-hub-polkadot/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-polkadot/Cargo.toml @@ -5,6 +5,7 @@ version = "0.6.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true @@ -14,8 +15,8 @@ workspace = true # Bridge Dependencies bp-bridge-hub-cumulus = { path = "../chain-bridge-hub-cumulus", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } -bp-messages = { path = "../messages", default-features = false } +bp-runtime = { path = "../../primitives/runtime", default-features = false } +bp-messages = { path = "../../primitives/messages", default-features = false } # Substrate Based Dependencies diff --git a/bridges/primitives/chain-bridge-hub-polkadot/src/lib.rs b/bridges/chains/chain-bridge-hub-polkadot/src/lib.rs similarity index 100% rename from bridges/primitives/chain-bridge-hub-polkadot/src/lib.rs rename to bridges/chains/chain-bridge-hub-polkadot/src/lib.rs diff --git a/bridges/primitives/chain-bridge-hub-rococo/Cargo.toml b/bridges/chains/chain-bridge-hub-rococo/Cargo.toml similarity index 82% rename from bridges/primitives/chain-bridge-hub-rococo/Cargo.toml rename to bridges/chains/chain-bridge-hub-rococo/Cargo.toml index 73d30b61a1b8..c9676327c279 100644 --- a/bridges/primitives/chain-bridge-hub-rococo/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-rococo/Cargo.toml @@ -5,6 +5,7 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true @@ -13,8 +14,8 @@ workspace = true # Bridge Dependencies bp-bridge-hub-cumulus = { path = "../chain-bridge-hub-cumulus", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } -bp-messages = { path = "../messages", default-features = false } +bp-runtime = { path = "../../primitives/runtime", default-features = false } +bp-messages = { path = "../../primitives/messages", default-features = false } # Substrate Based Dependencies diff --git a/bridges/primitives/chain-bridge-hub-rococo/src/lib.rs b/bridges/chains/chain-bridge-hub-rococo/src/lib.rs similarity index 100% rename from bridges/primitives/chain-bridge-hub-rococo/src/lib.rs rename to bridges/chains/chain-bridge-hub-rococo/src/lib.rs diff --git a/bridges/primitives/chain-bridge-hub-westend/Cargo.toml b/bridges/chains/chain-bridge-hub-westend/Cargo.toml similarity index 82% rename from bridges/primitives/chain-bridge-hub-westend/Cargo.toml rename to bridges/chains/chain-bridge-hub-westend/Cargo.toml index b0c07c18a341..6bc9804f3ec1 100644 --- a/bridges/primitives/chain-bridge-hub-westend/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-westend/Cargo.toml @@ -5,6 +5,7 @@ version = "0.3.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true @@ -14,8 +15,8 @@ workspace = true # Bridge Dependencies bp-bridge-hub-cumulus = { path = "../chain-bridge-hub-cumulus", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } -bp-messages = { path = "../messages", default-features = false } +bp-runtime = { path = "../../primitives/runtime", default-features = false } +bp-messages = { path = "../../primitives/messages", default-features = false } # Substrate Based Dependencies diff --git a/bridges/primitives/chain-bridge-hub-westend/src/lib.rs b/bridges/chains/chain-bridge-hub-westend/src/lib.rs similarity index 100% rename from bridges/primitives/chain-bridge-hub-westend/src/lib.rs rename to bridges/chains/chain-bridge-hub-westend/src/lib.rs diff --git a/bridges/primitives/chain-kusama/Cargo.toml b/bridges/chains/chain-kusama/Cargo.toml similarity index 68% rename from bridges/primitives/chain-kusama/Cargo.toml rename to bridges/chains/chain-kusama/Cargo.toml index 63859214d1b0..868631251670 100644 --- a/bridges/primitives/chain-kusama/Cargo.toml +++ b/bridges/chains/chain-kusama/Cargo.toml @@ -5,6 +5,7 @@ version = "0.5.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true @@ -13,9 +14,9 @@ workspace = true # Bridge Dependencies -bp-header-chain = { path = "../header-chain", default-features = false } -bp-polkadot-core = { path = "../polkadot-core", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } +bp-header-chain = { path = "../../primitives/header-chain", default-features = false } +bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } +bp-runtime = { path = "../../primitives/runtime", default-features = false } # Substrate Based Dependencies diff --git a/bridges/primitives/chain-kusama/src/lib.rs b/bridges/chains/chain-kusama/src/lib.rs similarity index 95% rename from bridges/primitives/chain-kusama/src/lib.rs rename to bridges/chains/chain-kusama/src/lib.rs index 7dd4a1c7362b..77464cf4c3ed 100644 --- a/bridges/primitives/chain-kusama/src/lib.rs +++ b/bridges/chains/chain-kusama/src/lib.rs @@ -55,8 +55,8 @@ impl Chain for Kusama { impl ChainWithGrandpa for Kusama { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = WITH_KUSAMA_GRANDPA_PALLET_NAME; const MAX_AUTHORITIES_COUNT: u32 = MAX_AUTHORITIES_COUNT; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = - REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = + REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY; const MAX_MANDATORY_HEADER_SIZE: u32 = MAX_MANDATORY_HEADER_SIZE; const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; } diff --git a/bridges/primitives/chain-polkadot-bulletin/Cargo.toml b/bridges/chains/chain-polkadot-bulletin/Cargo.toml similarity index 66% rename from bridges/primitives/chain-polkadot-bulletin/Cargo.toml rename to bridges/chains/chain-polkadot-bulletin/Cargo.toml index 598e856be0ac..07143ad8789c 100644 --- a/bridges/primitives/chain-polkadot-bulletin/Cargo.toml +++ b/bridges/chains/chain-polkadot-bulletin/Cargo.toml @@ -5,20 +5,21 @@ version = "0.4.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Bridge Dependencies -bp-header-chain = { path = "../header-chain", default-features = false } -bp-messages = { path = "../messages", default-features = false } -bp-polkadot-core = { path = "../polkadot-core", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } +bp-header-chain = { path = "../../primitives/header-chain", default-features = false } +bp-messages = { path = "../../primitives/messages", default-features = false } +bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } +bp-runtime = { path = "../../primitives/runtime", default-features = false } # Substrate Based Dependencies diff --git a/bridges/primitives/chain-polkadot-bulletin/src/lib.rs b/bridges/chains/chain-polkadot-bulletin/src/lib.rs similarity index 96% rename from bridges/primitives/chain-polkadot-bulletin/src/lib.rs rename to bridges/chains/chain-polkadot-bulletin/src/lib.rs index 8ade729c2839..b4c7f49e252f 100644 --- a/bridges/primitives/chain-polkadot-bulletin/src/lib.rs +++ b/bridges/chains/chain-polkadot-bulletin/src/lib.rs @@ -45,7 +45,7 @@ use sp_runtime::{traits::DispatchInfoOf, transaction_validity::TransactionValidi pub use bp_polkadot_core::{ AccountAddress, AccountId, Balance, Block, BlockNumber, Hash, Hasher, Header, Nonce, Signature, SignedBlock, UncheckedExtrinsic, AVERAGE_HEADER_SIZE, EXTRA_STORAGE_PROOF_SIZE, - MAX_MANDATORY_HEADER_SIZE, REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY, + MAX_MANDATORY_HEADER_SIZE, REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY, }; /// Maximal number of GRANDPA authorities at Polkadot Bulletin chain. @@ -64,7 +64,7 @@ const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(90); // Re following constants - we are using the same values at Cumulus parachains. They are limited // by the maximal transaction weight/size. Since block limits at Bulletin Chain are larger than -// at the Cumulus Bridgeg Hubs, we could reuse the same values. +// at the Cumulus Bridge Hubs, we could reuse the same values. /// Maximal number of unrewarded relayer entries at inbound lane for Cumulus-based parachains. pub const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = 1024; @@ -209,8 +209,8 @@ impl Chain for PolkadotBulletin { impl ChainWithGrandpa for PolkadotBulletin { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = WITH_POLKADOT_BULLETIN_GRANDPA_PALLET_NAME; const MAX_AUTHORITIES_COUNT: u32 = MAX_AUTHORITIES_COUNT; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = - REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = + REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY; const MAX_MANDATORY_HEADER_SIZE: u32 = MAX_MANDATORY_HEADER_SIZE; const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; } diff --git a/bridges/primitives/chain-polkadot/Cargo.toml b/bridges/chains/chain-polkadot/Cargo.toml similarity index 68% rename from bridges/primitives/chain-polkadot/Cargo.toml rename to bridges/chains/chain-polkadot/Cargo.toml index b38f3019c494..8fd320c25918 100644 --- a/bridges/primitives/chain-polkadot/Cargo.toml +++ b/bridges/chains/chain-polkadot/Cargo.toml @@ -5,6 +5,7 @@ version = "0.5.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true @@ -13,9 +14,9 @@ workspace = true # Bridge Dependencies -bp-header-chain = { path = "../header-chain", default-features = false } -bp-polkadot-core = { path = "../polkadot-core", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } +bp-header-chain = { path = "../../primitives/header-chain", default-features = false } +bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } +bp-runtime = { path = "../../primitives/runtime", default-features = false } # Substrate Based Dependencies diff --git a/bridges/primitives/chain-polkadot/src/lib.rs b/bridges/chains/chain-polkadot/src/lib.rs similarity index 96% rename from bridges/primitives/chain-polkadot/src/lib.rs rename to bridges/chains/chain-polkadot/src/lib.rs index 88cf72667215..0c1f18cb091c 100644 --- a/bridges/primitives/chain-polkadot/src/lib.rs +++ b/bridges/chains/chain-polkadot/src/lib.rs @@ -57,8 +57,8 @@ impl Chain for Polkadot { impl ChainWithGrandpa for Polkadot { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = WITH_POLKADOT_GRANDPA_PALLET_NAME; const MAX_AUTHORITIES_COUNT: u32 = MAX_AUTHORITIES_COUNT; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = - REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = + REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY; const MAX_MANDATORY_HEADER_SIZE: u32 = MAX_MANDATORY_HEADER_SIZE; const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; } diff --git a/bridges/primitives/chain-rococo/Cargo.toml b/bridges/chains/chain-rococo/Cargo.toml similarity index 68% rename from bridges/primitives/chain-rococo/Cargo.toml rename to bridges/chains/chain-rococo/Cargo.toml index 6d79ef8606d3..9141912bd30e 100644 --- a/bridges/primitives/chain-rococo/Cargo.toml +++ b/bridges/chains/chain-rococo/Cargo.toml @@ -5,6 +5,7 @@ version = "0.6.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true @@ -13,9 +14,9 @@ workspace = true # Bridge Dependencies -bp-header-chain = { path = "../header-chain", default-features = false } -bp-polkadot-core = { path = "../polkadot-core", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } +bp-header-chain = { path = "../../primitives/header-chain", default-features = false } +bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } +bp-runtime = { path = "../../primitives/runtime", default-features = false } # Substrate Based Dependencies diff --git a/bridges/primitives/chain-rococo/src/lib.rs b/bridges/chains/chain-rococo/src/lib.rs similarity index 95% rename from bridges/primitives/chain-rococo/src/lib.rs rename to bridges/chains/chain-rococo/src/lib.rs index 01cc06a1d6b5..857e8fe5dcd8 100644 --- a/bridges/primitives/chain-rococo/src/lib.rs +++ b/bridges/chains/chain-rococo/src/lib.rs @@ -55,8 +55,8 @@ impl Chain for Rococo { impl ChainWithGrandpa for Rococo { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = WITH_ROCOCO_GRANDPA_PALLET_NAME; const MAX_AUTHORITIES_COUNT: u32 = MAX_AUTHORITIES_COUNT; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = - REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = + REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY; const MAX_MANDATORY_HEADER_SIZE: u32 = MAX_MANDATORY_HEADER_SIZE; const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; } diff --git a/bridges/primitives/chain-westend/Cargo.toml b/bridges/chains/chain-westend/Cargo.toml similarity index 68% rename from bridges/primitives/chain-westend/Cargo.toml rename to bridges/chains/chain-westend/Cargo.toml index 731e388b929c..af666a94632d 100644 --- a/bridges/primitives/chain-westend/Cargo.toml +++ b/bridges/chains/chain-westend/Cargo.toml @@ -5,6 +5,7 @@ version = "0.3.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true @@ -13,9 +14,9 @@ workspace = true # Bridge Dependencies -bp-header-chain = { path = "../header-chain", default-features = false } -bp-polkadot-core = { path = "../polkadot-core", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } +bp-header-chain = { path = "../../primitives/header-chain", default-features = false } +bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } +bp-runtime = { path = "../../primitives/runtime", default-features = false } # Substrate Based Dependencies diff --git a/bridges/primitives/chain-westend/src/lib.rs b/bridges/chains/chain-westend/src/lib.rs similarity index 95% rename from bridges/primitives/chain-westend/src/lib.rs rename to bridges/chains/chain-westend/src/lib.rs index 1b98ff775693..222d548eb7e0 100644 --- a/bridges/primitives/chain-westend/src/lib.rs +++ b/bridges/chains/chain-westend/src/lib.rs @@ -55,8 +55,8 @@ impl Chain for Westend { impl ChainWithGrandpa for Westend { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = WITH_WESTEND_GRANDPA_PALLET_NAME; const MAX_AUTHORITIES_COUNT: u32 = MAX_AUTHORITIES_COUNT; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = - REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = + REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY; const MAX_MANDATORY_HEADER_SIZE: u32 = MAX_MANDATORY_HEADER_SIZE; const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; } diff --git a/bridges/docs/bridge-relayers-claim-rewards.png b/bridges/docs/bridge-relayers-claim-rewards.png new file mode 100644 index 000000000000..d56b8dd871e8 Binary files /dev/null and b/bridges/docs/bridge-relayers-claim-rewards.png differ diff --git a/bridges/docs/bridge-relayers-deregister.png b/bridges/docs/bridge-relayers-deregister.png new file mode 100644 index 000000000000..e7706cee7891 Binary files /dev/null and b/bridges/docs/bridge-relayers-deregister.png differ diff --git a/bridges/docs/bridge-relayers-register.png b/bridges/docs/bridge-relayers-register.png new file mode 100644 index 000000000000..e9e3e1b5ac87 Binary files /dev/null and b/bridges/docs/bridge-relayers-register.png differ diff --git a/bridges/docs/running-relayer.md b/bridges/docs/running-relayer.md new file mode 100644 index 000000000000..710810a476e4 --- /dev/null +++ b/bridges/docs/running-relayer.md @@ -0,0 +1,343 @@ +# Running your own bridge relayer + +:warning: :construction: Please read the [Disclaimer](#disclaimer) section first :construction: :warning: + +## Disclaimer + +There are several things you should know before running your own relayer: + +- initial bridge version (we call it bridges v1) supports any number of relayers, but **there's no guaranteed +compensation** for running a relayer and/or submitting valid bridge transactions. Most probably you'll end up +spending more funds than getting from rewards - please accept this fact; + +- even if your relayer has managed to submit a valid bridge transaction that has been included into the bridge +hub block, there's no guarantee that you will be able to claim your compensation for that transaction. That's +because compensations are paid from the account, controlled by relay chain governance and it could have no funds +to compensate your useful actions. We'll be working on a proper process to resupply it on-time, but we can't +provide any guarantee until that process is well established. + +## A Brief Introduction into Relayers and our Compensations Scheme + +Omitting details, relayer is an offchain process that is connected to both bridged chains. It looks at the +outbound bridge messages queue and submits message delivery transactions to the target chain. There's a lot +of details behind that simple phrase - you could find more info in the +[High-Level Bridge Overview](./high-level-overview.md) document. + +Reward that is paid to relayer has two parts. The first part static and is controlled by the governance. +It is rather small initially - e.g. you need to deliver `10_000` Kusama -> Polkadot messages to gain single +KSM token. + +The other reward part is dynamic. So to deliver an XCM message from one BridgeHub to another, we'll need to +submit two transactions on different chains. Every transaction has its cost, which is: + +- dynamic, because e.g. message size can change and/or fee factor of the target chain may change; + +- quite large, because those transactions are quite heavy (mostly in terms of size, not weight). + +We are compensating the cost of **valid**, **minimal** and **useful** bridge-related transactions to +relayer, that has submitted such transaction. Valid here means that the transaction doesn't fail. Minimal +means that all data within transaction call is actually required for the transaction to succeed. Useful +means that all supplied data in transaction is new and yet unknown to the target chain. + +We have implemented a relayer that is able to craft such transactions. The rest of document contains a detailed +information on how to deploy this software on your own node. + +## Relayers Concurrency + +As it has been said above, we are not compensating cost of transactions that are not **useful**. For +example, if message `100` has already been delivered from Kusama Bridge Hub to Polkadot Bridge Hub, then another +transaction that delivers the same message `100` won't be **useful**. Hence, no compensation to relayer that +has submitted that second transaction. + +But what if there are several relayers running? They are noticing the same queued message `100` and +simultaneously submit identical message delivery transactions. You may expect that there'll be one lucky +relayer, whose transaction would win the "race" and which will receive the compensation and reward. And +there'll be several other relayers, losing some funds on their unuseful transactions. + +But actually, we have a solution that invalidates transactions of "unlucky" relayers before they are +included into the block. So at least you may be sure that you won't waste your funds on duplicate transactions. + +
+Some details? + +All **unuseful** transactions are rejected by our +[transaction extension](https://github.com/paritytech/polkadot-sdk/blob/master/bridges/bin/runtime-common/src/refund_relayer_extension.rs), +which also handles transaction fee compensations. You may find more info on unuseful (aka obsolete) transactions +by lurking in the code. + +We also have the WiP prototype of relayers coordination protocol, where relayers will get some guarantee +that their transactions will be prioritized over other relayers transactions at their assigned slots. +That is planned for the future version of bridge and the progress is +[tracked here](https://github.com/paritytech/parity-bridges-common/issues/2486). + +
+ +## Prerequisites + +Let's focus on the bridge between Polkadot and Kusama Bridge Hubs. Let's also assume that we want to start +a relayer that "serves" an initial lane [`0x00000001`](https://github.com/polkadot-fellows/runtimes/blob/9ce1bbbbcd7843b3c76ba4d43c036bc311959e9f/system-parachains/bridge-hubs/bridge-hub-kusama/src/bridge_to_polkadot_config.rs#L54). + +
+Lane? + +Think of lane as a queue of messages that need to be delivered to the other/bridged chain. The lane is +bidirectional, meaning that there are four "endpoints". Two "outbound" endpoints (one at every chain), contain +messages that need to be delivered to the bridged chain. Two "inbound" are accepting messages from the bridged +chain and also remember the relayer, who has delivered message(s) to reward it later. + +
+ +The same steps may be performed for other lanes and bridges as well - you'll just need to change several parameters. + +So to start your relayer instance, you'll need to prepare: + +- an address of ws/wss RPC endpoint of the Kusama relay chain; + +- an address of ws/wss RPC endpoint of the Polkadot relay chain; + +- an address of ws/wss RPC endpoint of the Kusama Bridge Hub chain; + +- an address of ws/wss RPC endpoint of the Polkadot Bridge Hub chain; + +- an account on Kusama Bridge Hub; + +- an account on Polkadot Bridge Hub. + +For RPC endpoints, you could start your own nodes, or use some public community nodes. Nodes are not meant to be +archive or provide access to insecure RPC calls. + +To create an account on Bridge Hubs, you could use XCM teleport functionality. E.g. if you have an account on +the relay chain, you could use the `teleportAssets` call of `xcmPallet` and send asset +`V3 { id: Concrete(0, Here), Fungible: }` to beneficiary `V3(0, X1(AccountId32()))` +on destination `V3(0, X1(Parachain(1002)))`. To estimate amounts you need, please refer to the [Costs](#costs) +section of the document. + +## Registering your Relayer Account (Optional, But Please Read) + +Bridge transactions are quite heavy and expensive. We want to minimize block space that can be occupied by +invalid bridge transactions and prioritize valid transactions over invalid. That is achieved by **optional** +relayer registration. Transactions, signed by relayers with active registration, gain huge priority boost. +In exchange, such relayers may be slashed if they submit **invalid** or **non-minimal** transaction. + +Transactions, signed by relayers **without** active registration, on the other hand, receive no priority +boost. It means that if there is active registered relayer, most likely all transactions from unregistered +will be counted as **unuseful**, not included into the block and unregistered relayer won't get any reward +for his operations. + +Before registering, you should know several things about your funds: + +- to register, you need to hold significant amount of funds on your relayer account. As of now, it is + [100 KSM](https://github.com/polkadot-fellows/runtimes/blob/9ce1bbbbcd7843b3c76ba4d43c036bc311959e9f/system-parachains/bridge-hubs/bridge-hub-kusama/src/bridge_to_polkadot_config.rs#L71C14-L71C43) + for registration on Kusama Bridge Hub and + [500 DOT](https://github.com/polkadot-fellows/runtimes/blob/9ce1bbbbcd7843b3c76ba4d43c036bc311959e9f/system-parachains/bridge-hubs/bridge-hub-polkadot/src/bridge_to_kusama_config.rs#L71C14-L71C43) + for registration on Polkadot Bridge Hub; + +- when you are registered, those funds are reserved on relayer account and you can't transfer them. + +The registration itself, has three states: active, inactive or expired. Initially, it is active, meaning that all +your transactions that are **validated** on top of block, where it is active get priority boost. Registration +becomes expired when the block with the number you have specified during registration is "mined". It is the +`validTill` parameter of the `register` call (see below). After that `validTill` block, you may unregister and get +your reserved funds back. There's also an intermediate point between those blocks - it is the `validTill - LEASE`, +where `LEASE` is the the chain constant, controlled by the governance. Initially it is set to `300` blocks. +All your transactions, **validated** between the `validTill - LEASE` and `validTill` blocks do not get the +priority boost. Also, it is forbidden to specify `validTill` such that the `validTill - currentBlock` is less +than the `LEASE`. + +
+Example? + +| Bridge Hub Block | Registration State | Comment | +| ----------------- | ------------------ | ------------------------------------------------------ | +| 100 | Active | You have submitted a tx with the `register(1000)` call | +| 101 | Active | Your message delivery transactions are boosted | +| 102 | Active | Your message delivery transactions are boosted | +| ... | Active | Your message delivery transactions are boosted | +| 700 | Inactive | Your message delivery transactions are not boosted | +| 701 | Inactive | Your message delivery transactions are not boosted | +| ... | Inactive | Your message delivery transactions are not boosted | +| 1000 | Expired | Your may submit a tx with the `deregister` call | + +
+ +So once you have enough funds on your account and have selected the `validTill` parameter value, you +could use the Polkadot JS apps to submit an extrinsic. If you want priority boost for your transactions +on the Kusama Bridge Hub, open the +[Polkadot JS Apps](https://polkadot.js.org/apps/?rpc=wss%3A%2F%2Fkusama-bridge-hub-rpc.polkadot.io#/extrinsics) +and submit the `register` extrinsic from the `bridgeRelayers` pallet: + +![Register Extrinsic](./bridge-relayers-register.png) + +To deregister, submit the simple `deregister` extrinsic when registration is expired: + +![Deregister Extrinsic](./bridge-relayers-deregister.png) + +At any time, you can prolong your registration by calling the `register` with the larger `validTill`. + +## Costs + +Your relayer account (on both Bridge Hubs) must hold enough funds to be able to pay costs of bridge +transactions. If your relayer behaves correctly, those costs will be compensated and you will be +able to claim it later. + +**IMPORTANT**: you may add tip to your bridge transactions to boost their priority. But our +compensation mechanism never refunds transaction tip, so all tip tokens will be lost. + +
+Types of bridge transactions + +There are two types of bridge transactions: + +- message delivery transaction brings queued message(s) from one Bridge Hub to another. We record + the fact that this specific (your) relayer has delivered those messages; + +- message confirmation transaction confirms that some message have been delivered and also brings + back information on how many messages (your) relayer has delivered. We use this information later + to register delivery rewards on the source chain. + +Several messages/confirmations may be included in a single bridge transaction. Apart from this +data, bridge transaction may include finality and storage proofs, required to prove authenticity of +this data. + +
+ +To deliver and get reward for a single message, the relayer needs to submit two transactions. One +at the source Bridge Hub and one at the target Bridge Hub. Below are costs for Polkadot <> Kusama +messages (as of today): + +- to deliver a single Polkadot -> Kusama message, you would need to pay around `0.06 KSM` at Kusama + Bridge Hub and around `1.62 DOT` at Polkadot Bridge Hub; + +- to deliver a single Kusama -> Polkadot message, you would need to pay around `1.70 DOT` at Polkadot + Bridge Hub and around `0.05 KSM` at Kusama Bridge Hub. + +Those values are not constants - they depend on call weights (that may change from release to release), +on transaction sizes (that depends on message size and chain state) and congestion factor. In any +case - it is your duty to make sure that the relayer has enough funds to pay transaction fees. + +## Claiming your Compensations and Rewards + +Hopefully you have successfully delivered some messages and now can claim your compensation and reward. +This requires submitting several transactions. But first, let's check that you actually have something to +claim. For that, let's check the state of the pallet that tracks all rewards. + +To check your rewards at the Kusama Bridge Hub, go to the +[Polkadot JS Apps](https://polkadot.js.org/apps/?rpc=wss%3A%2F%2Fkusama-bridge-hub-rpc.polkadot.io#/chainstate) +targeting Kusama Bridge Hub, select the `bridgeRelayers` pallet, choose `relayerRewards` map and +your relayer account. Then: + +- set the `laneId` to `0x00000001` + +- set the `bridgedChainId` to `bhpd`; + +- check the both variants of the `owner` field: `ThisChain` is used to pay for message delivery transactions + and `BridgedChain` is used to pay for message confirmation transactions. + +If check shows that you have some rewards, you can craft the claim transaction, with similar parameters. +For that, go to `Extrinsics` tab of the +[Polkadot JS Apps](https://polkadot.js.org/apps/?rpc=wss%3A%2F%2Fkusama-bridge-hub-rpc.polkadot.io#/extrinsics) +and submit the following transaction (make sure to change `owner` before): + +![Claim Rewards Extrinsic](./bridge-relayers-claim-rewards.png) + +To claim rewards on Polkadot Bridge Hub you can follow the same process. The only difference is that you +need to set value of the `bridgedChainId` to `bhks`. + +## Starting your Relayer + +### Starting your Rococo <> Westend Relayer + +You may find the relayer image reference in the +[Releases](https://github.com/paritytech/parity-bridges-common/releases) +of this repository. Make sure to check supported (bundled) versions +of release there. For Rococo <> Westend bridge, normally you may use the +latest published release. The release notes always contain the docker +image reference and source files, required to build relayer manually. + +Once you have the docker image, update variables and run the following script: +```sh +export DOCKER_IMAGE= + +export ROCOCO_HOST= +export ROCOCO_PORT= +# or set it to '--rococo-secure' if wss is used above +export ROCOCO_IS_SECURE= +export BRIDGE_HUB_ROCOCO_HOST= +export BRIDGE_HUB_ROCOCO_PORT= +# or set it to '--bridge-hub-rococo-secure' if wss is used above +export BRIDGE_HUB_ROCOCO_IS_SECURE= +export BRIDGE_HUB_ROCOCO_KEY_FILE= + +export WESTEND_HOST= +export WESTEND_PORT= +# or set it to '--westend-secure' if wss is used above +export WESTEND_IS_SECURE= +export BRIDGE_HUB_WESTEND_HOST= +export BRIDGE_HUB_WESTEND_PORT= +# or set it to '--bridge-hub-westend-secure ' if wss is used above +export BRIDGE_HUB_WESTEND_IS_SECURE= +export BRIDGE_HUB_WESTEND_KEY_FILE= + +# you can get extended relay logs (e.g. for debugging issues) by passing `-e RUST_LOG=bridge=trace` +# argument to the `docker` binary +docker run \ + -v $BRIDGE_HUB_ROCOCO_KEY_FILE:/bhr.key \ + -v $BRIDGE_HUB_WESTEND_KEY_FILE:/bhw.key \ + $DOCKER_IMAGE \ + relay-headers-and-messages bridge-hub-rococo-bridge-hub-westend \ + --rococo-host $ROCOCO_HOST \ + --rococo-port $ROCOCO_PORT \ + $ROCOCO_IS_SECURE \ + --rococo-version-mode Auto \ + --bridge-hub-rococo-host $BRIDGE_HUB_ROCOCO_HOST \ + --bridge-hub-rococo-port $BRIDGE_HUB_ROCOCO_PORT \ + $BRIDGE_HUB_ROCOCO_IS_SECURE \ + --bridge-hub-rococo-version-mode Auto \ + --bridge-hub-rococo-signer-file /bhr.key \ + --bridge-hub-rococo-transactions-mortality 16 \ + --westend-host $WESTEND_HOST \ + --westend-port $WESTEND_PORT \ + $WESTEND_IS_SECURE \ + --westend-version-mode Auto \ + --bridge-hub-westend-host $BRIDGE_HUB_WESTEND_HOST \ + --bridge-hub-westend-port $BRIDGE_HUB_WESTEND_PORT \ + $BRIDGE_HUB_WESTEND_IS_SECURE \ + --bridge-hub-westend-version-mode Auto \ + --bridge-hub-westend-signer-file /bhw.key \ + --bridge-hub-westend-transactions-mortality 16 \ + --lane 00000002 +``` + +### Starting your Polkadot <> Kusama Relayer + +*Work in progress, coming soon* + +### Watching your relayer state + +Our relayer provides some Prometheus metrics that you may convert into some fancy Grafana dashboards +and alerts. By default, metrics are exposed at port `9616`. To expose endpoint to the localhost, change +the docker command by adding following two lines: + +```sh +docker run \ + .. + -p 127.0.0.1:9616:9616 \ # tell Docker to bind container port 9616 to host port 9616 + # and listen for connections on the host' localhost interface + .. + $DOCKER_IMAGE \ + relay-headers-and-messages bridge-hub-rococo-bridge-hub-westend \ + --prometheus-host 0.0.0.0 \ # tell `substrate-relay` binary to accept Prometheus endpoint + # connections from everywhere + .. +``` + +You can find more info on configuring Prometheus and Grafana in the +[Monitor your node](https://wiki.polkadot.network/docs/maintain-guides-how-to-monitor-your-node) +guide from Polkadot wiki. + +We have our own set of Grafana dashboards and alerts. You may use them for inspiration. +Please find them in this folder: + +- for Rococo <> Westend bridge: [rococo-westend](https://github.com/paritytech/parity-bridges-common/tree/master/deployments/bridges/rococo-westend). + +- for Polkadot <> Kusama bridge: *work in progress, coming soon* diff --git a/bridges/modules/grandpa/Cargo.toml b/bridges/modules/grandpa/Cargo.toml index 02ef57a55974..4e78ca6829ee 100644 --- a/bridges/modules/grandpa/Cargo.toml +++ b/bridges/modules/grandpa/Cargo.toml @@ -5,6 +5,7 @@ description = "Module implementing GRANDPA on-chain light client used for bridgi authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true @@ -12,10 +13,10 @@ workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } finality-grandpa = { version = "0.16.2", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Bridge Dependencies diff --git a/bridges/modules/grandpa/README.md b/bridges/modules/grandpa/README.md index 992bd2cc4722..4a3099b8afc6 100644 --- a/bridges/modules/grandpa/README.md +++ b/bridges/modules/grandpa/README.md @@ -10,7 +10,7 @@ It is used by the parachains light client (bridge parachains pallet) and by mess ## A Brief Introduction into GRANDPA Finality You can find detailed information on GRANDPA, by exploring its [repository](https://github.com/paritytech/finality-grandpa). -Here is the minimal reqiuired GRANDPA information to understand how pallet works. +Here is the minimal required GRANDPA information to understand how pallet works. Any Substrate chain may use different block authorship algorithms (like BABE or Aura) to determine block producers and generate blocks. This has nothing common with finality, though - the task of block authorship is to coordinate diff --git a/bridges/modules/grandpa/src/call_ext.rs b/bridges/modules/grandpa/src/call_ext.rs index bc1bbfdc1479..6d7770a89445 100644 --- a/bridges/modules/grandpa/src/call_ext.rs +++ b/bridges/modules/grandpa/src/call_ext.rs @@ -205,7 +205,7 @@ pub(crate) fn submit_finality_proof_info_from_args, I: 'static>( // as an extra weight. let votes_ancestries_len = justification.votes_ancestries.len().saturated_into(); let extra_weight = - if votes_ancestries_len > T::BridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY { + if votes_ancestries_len > T::BridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY { T::WeightInfo::submit_finality_proof(precommits_len, votes_ancestries_len) } else { Weight::zero() @@ -396,11 +396,11 @@ mod tests { let finality_target = test_header(1); let mut justification_params = JustificationGeneratorParams { header: finality_target.clone(), - ancestors: TestBridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY, + ancestors: TestBridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY, ..Default::default() }; - // when there are `REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY` headers => no refund + // when there are `REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY` headers => no refund let justification = make_justification_for_header(justification_params.clone()); let call = RuntimeCall::Grandpa(crate::Call::submit_finality_proof_ex { finality_target: Box::new(finality_target.clone()), @@ -409,7 +409,7 @@ mod tests { }); assert_eq!(call.submit_finality_proof_info().unwrap().extra_weight, Weight::zero()); - // when there are `REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY + 1` headers => full refund + // when there are `REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY + 1` headers => full refund justification_params.ancestors += 1; let justification = make_justification_for_header(justification_params); let call_weight = ::WeightInfo::submit_finality_proof( diff --git a/bridges/modules/grandpa/src/lib.rs b/bridges/modules/grandpa/src/lib.rs index 2f787ebd7c31..7afdb4094d93 100644 --- a/bridges/modules/grandpa/src/lib.rs +++ b/bridges/modules/grandpa/src/lib.rs @@ -939,7 +939,7 @@ mod tests { } #[test] - fn succesfully_imports_header_with_valid_finality() { + fn successfully_imports_header_with_valid_finality() { run_test(|| { initialize_substrate_bridge(); @@ -1196,7 +1196,7 @@ mod tests { header.digest = change_log(0); let justification = make_justification_for_header(JustificationGeneratorParams { header: header.clone(), - ancestors: TestBridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY + 1, + ancestors: TestBridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY + 1, ..Default::default() }); diff --git a/bridges/modules/grandpa/src/mock.rs b/bridges/modules/grandpa/src/mock.rs index 4318d663a2e1..e689e520c92f 100644 --- a/bridges/modules/grandpa/src/mock.rs +++ b/bridges/modules/grandpa/src/mock.rs @@ -87,7 +87,7 @@ impl Chain for TestBridgedChain { impl ChainWithGrandpa for TestBridgedChain { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = ""; const MAX_AUTHORITIES_COUNT: u32 = MAX_BRIDGED_AUTHORITIES; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 8; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = 8; const MAX_MANDATORY_HEADER_SIZE: u32 = 256; const AVERAGE_HEADER_SIZE: u32 = 64; } diff --git a/bridges/modules/messages/Cargo.toml b/bridges/modules/messages/Cargo.toml index 038e3aa5da61..cf40df6a1c89 100644 --- a/bridges/modules/messages/Cargo.toml +++ b/bridges/modules/messages/Cargo.toml @@ -5,15 +5,16 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } num-traits = { version = "0.2", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Bridge dependencies diff --git a/bridges/modules/messages/src/inbound_lane.rs b/bridges/modules/messages/src/inbound_lane.rs index ed3c6f11e7c1..f3b62222b59c 100644 --- a/bridges/modules/messages/src/inbound_lane.rs +++ b/bridges/modules/messages/src/inbound_lane.rs @@ -21,7 +21,7 @@ use crate::Config; use bp_messages::{ target_chain::{DispatchMessage, DispatchMessageData, MessageDispatch}, DeliveredMessages, InboundLaneData, LaneId, MessageKey, MessageNonce, OutboundLaneData, - ReceivalResult, UnrewardedRelayer, + ReceptionResult, UnrewardedRelayer, }; use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; use core::cmp::PartialEq; @@ -170,21 +170,21 @@ impl InboundLane { relayer_at_bridged_chain: &S::Relayer, nonce: MessageNonce, message_data: DispatchMessageData, - ) -> ReceivalResult { + ) -> ReceptionResult { let mut data = self.storage.get_or_init_data(); if Some(nonce) != data.last_delivered_nonce().checked_add(1) { - return ReceivalResult::InvalidNonce + return ReceptionResult::InvalidNonce } // if there are more unrewarded relayer entries than we may accept, reject this message if data.relayers.len() as MessageNonce >= self.storage.max_unrewarded_relayer_entries() { - return ReceivalResult::TooManyUnrewardedRelayers + return ReceptionResult::TooManyUnrewardedRelayers } // if there are more unconfirmed messages than we may accept, reject this message let unconfirmed_messages_count = nonce.saturating_sub(data.last_confirmed_nonce); if unconfirmed_messages_count > self.storage.max_unconfirmed_messages() { - return ReceivalResult::TooManyUnconfirmedMessages + return ReceptionResult::TooManyUnconfirmedMessages } // then, dispatch message @@ -207,7 +207,7 @@ impl InboundLane { }; self.storage.set_data(data); - ReceivalResult::Dispatched(dispatch_result) + ReceptionResult::Dispatched(dispatch_result) } } @@ -235,7 +235,7 @@ mod tests { nonce, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::Dispatched(dispatch_result(0)) + ReceptionResult::Dispatched(dispatch_result(0)) ); } @@ -362,7 +362,7 @@ mod tests { 10, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::InvalidNonce + ReceptionResult::InvalidNonce ); assert_eq!(lane.storage.get_or_init_data().last_delivered_nonce(), 0); }); @@ -381,7 +381,7 @@ mod tests { current_nonce, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::Dispatched(dispatch_result(0)) + ReceptionResult::Dispatched(dispatch_result(0)) ); } // Fails to dispatch new message from different than latest relayer. @@ -391,7 +391,7 @@ mod tests { max_nonce + 1, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::TooManyUnrewardedRelayers, + ReceptionResult::TooManyUnrewardedRelayers, ); // Fails to dispatch new messages from latest relayer. Prevents griefing attacks. assert_eq!( @@ -400,7 +400,7 @@ mod tests { max_nonce + 1, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::TooManyUnrewardedRelayers, + ReceptionResult::TooManyUnrewardedRelayers, ); }); } @@ -417,7 +417,7 @@ mod tests { current_nonce, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::Dispatched(dispatch_result(0)) + ReceptionResult::Dispatched(dispatch_result(0)) ); } // Fails to dispatch new message from different than latest relayer. @@ -427,7 +427,7 @@ mod tests { max_nonce + 1, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::TooManyUnconfirmedMessages, + ReceptionResult::TooManyUnconfirmedMessages, ); // Fails to dispatch new messages from latest relayer. assert_eq!( @@ -436,7 +436,7 @@ mod tests { max_nonce + 1, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::TooManyUnconfirmedMessages, + ReceptionResult::TooManyUnconfirmedMessages, ); }); } @@ -451,7 +451,7 @@ mod tests { 1, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::Dispatched(dispatch_result(0)) + ReceptionResult::Dispatched(dispatch_result(0)) ); assert_eq!( lane.receive_message::( @@ -459,7 +459,7 @@ mod tests { 2, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::Dispatched(dispatch_result(0)) + ReceptionResult::Dispatched(dispatch_result(0)) ); assert_eq!( lane.receive_message::( @@ -467,7 +467,7 @@ mod tests { 3, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::Dispatched(dispatch_result(0)) + ReceptionResult::Dispatched(dispatch_result(0)) ); assert_eq!( lane.storage.get_or_init_data().relayers, @@ -490,7 +490,7 @@ mod tests { 1, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::Dispatched(dispatch_result(0)) + ReceptionResult::Dispatched(dispatch_result(0)) ); assert_eq!( lane.receive_message::( @@ -498,7 +498,7 @@ mod tests { 1, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::InvalidNonce, + ReceptionResult::InvalidNonce, ); }); } @@ -524,7 +524,7 @@ mod tests { 1, inbound_message_data(payload) ), - ReceivalResult::Dispatched(dispatch_result(1)) + ReceptionResult::Dispatched(dispatch_result(1)) ); }); } diff --git a/bridges/modules/messages/src/lib.rs b/bridges/modules/messages/src/lib.rs index e5fd00a4a5a6..385d7a852145 100644 --- a/bridges/modules/messages/src/lib.rs +++ b/bridges/modules/messages/src/lib.rs @@ -49,7 +49,7 @@ pub use weights_ext::{ use crate::{ inbound_lane::{InboundLane, InboundLaneStorage}, - outbound_lane::{OutboundLane, OutboundLaneStorage, ReceivalConfirmationError}, + outbound_lane::{OutboundLane, OutboundLaneStorage, ReceptionConfirmationError}, }; use bp_messages::{ @@ -92,7 +92,7 @@ pub const LOG_TARGET: &str = "runtime::bridge-messages"; #[frame_support::pallet] pub mod pallet { use super::*; - use bp_messages::{ReceivalResult, ReceivedMessages}; + use bp_messages::{ReceivedMessages, ReceptionResult}; use bp_runtime::RangeInclusiveExt; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; @@ -378,13 +378,13 @@ pub mod pallet { // delivery transaction cost anyway. And base cost covers everything except // dispatch, so we have a balance here. let unspent_weight = match &receival_result { - ReceivalResult::Dispatched(dispatch_result) => { + ReceptionResult::Dispatched(dispatch_result) => { valid_messages += 1; dispatch_result.unspent_weight }, - ReceivalResult::InvalidNonce | - ReceivalResult::TooManyUnrewardedRelayers | - ReceivalResult::TooManyUnconfirmedMessages => message_dispatch_weight, + ReceptionResult::InvalidNonce | + ReceptionResult::TooManyUnrewardedRelayers | + ReceptionResult::TooManyUnconfirmedMessages => message_dispatch_weight, }; lane_messages_received_status.push(message.key.nonce, receival_result); @@ -457,7 +457,7 @@ pub mod pallet { last_delivered_nonce, &lane_data.relayers, ) - .map_err(Error::::ReceivalConfirmation)?; + .map_err(Error::::ReceptionConfirmation)?; if let Some(confirmed_messages) = confirmed_messages { // emit 'delivered' event @@ -565,7 +565,7 @@ pub mod pallet { /// The message someone is trying to work with (i.e. increase fee) is not yet sent. MessageIsNotYetSent, /// Error confirming messages receival. - ReceivalConfirmation(ReceivalConfirmationError), + ReceptionConfirmation(ReceptionConfirmationError), /// Error generated by the `OwnedBridgeModule` trait. BridgeModule(bp_runtime::OwnedBridgeModuleError), } @@ -925,7 +925,7 @@ mod tests { PAYLOAD_REJECTED_BY_TARGET_CHAIN, REGULAR_PAYLOAD, TEST_LANE_ID, TEST_LANE_ID_2, TEST_LANE_ID_3, TEST_RELAYER_A, TEST_RELAYER_B, }, - outbound_lane::ReceivalConfirmationError, + outbound_lane::ReceptionConfirmationError, }; use bp_messages::{ source_chain::MessagesBridge, BridgeMessagesCall, UnrewardedRelayer, @@ -952,11 +952,11 @@ mod tests { let outbound_lane = outbound_lane::(lane_id); let message_nonce = outbound_lane.data().latest_generated_nonce + 1; - let prev_enqueud_messages = outbound_lane.data().queued_messages().saturating_len(); + let prev_enqueued_messages = outbound_lane.data().queued_messages().saturating_len(); let valid_message = Pallet::::validate_message(lane_id, ®ULAR_PAYLOAD) .expect("validate_message has failed"); let artifacts = Pallet::::send_message(valid_message); - assert_eq!(artifacts.enqueued_messages, prev_enqueud_messages + 1); + assert_eq!(artifacts.enqueued_messages, prev_enqueued_messages + 1); // check event with assigned nonce assert_eq!( @@ -1543,7 +1543,7 @@ mod tests { } #[test] - fn actual_dispatch_weight_does_not_overlow() { + fn actual_dispatch_weight_does_not_overflow() { run_test(|| { let message1 = message(1, message_payload(0, u64::MAX / 2)); let message2 = message(2, message_payload(0, u64::MAX / 2)); @@ -1777,7 +1777,7 @@ mod tests { // returns `last_confirmed_nonce`; // 3) it means that we're going to confirm delivery of messages 1..=1; // 4) so the number of declared messages (see `UnrewardedRelayersState`) is `0` and - // numer of actually confirmed messages is `1`. + // number of actually confirmed messages is `1`. assert_noop!( Pallet::::receive_messages_delivery_proof( RuntimeOrigin::signed(1), @@ -1787,8 +1787,8 @@ mod tests { ))), UnrewardedRelayersState { last_delivered_nonce: 1, ..Default::default() }, ), - Error::::ReceivalConfirmation( - ReceivalConfirmationError::TryingToConfirmMoreMessagesThanExpected + Error::::ReceptionConfirmation( + ReceptionConfirmationError::TryingToConfirmMoreMessagesThanExpected ), ); }); diff --git a/bridges/modules/messages/src/outbound_lane.rs b/bridges/modules/messages/src/outbound_lane.rs index 9c2bc2e8971e..a082c4bc0652 100644 --- a/bridges/modules/messages/src/outbound_lane.rs +++ b/bridges/modules/messages/src/outbound_lane.rs @@ -53,7 +53,7 @@ pub type StoredMessagePayload = BoundedVec>::MaximalOu /// Result of messages receival confirmation. #[derive(Encode, Decode, RuntimeDebug, PartialEq, Eq, PalletError, TypeInfo)] -pub enum ReceivalConfirmationError { +pub enum ReceptionConfirmationError { /// Bridged chain is trying to confirm more messages than we have generated. May be a result /// of invalid bridged chain storage. FailedToConfirmFutureMessages, @@ -103,7 +103,7 @@ impl OutboundLane { max_allowed_messages: MessageNonce, latest_delivered_nonce: MessageNonce, relayers: &VecDeque>, - ) -> Result, ReceivalConfirmationError> { + ) -> Result, ReceptionConfirmationError> { let mut data = self.storage.data(); let confirmed_messages = DeliveredMessages { begin: data.latest_received_nonce.saturating_add(1), @@ -113,7 +113,7 @@ impl OutboundLane { return Ok(None) } if confirmed_messages.end > data.latest_generated_nonce { - return Err(ReceivalConfirmationError::FailedToConfirmFutureMessages) + return Err(ReceptionConfirmationError::FailedToConfirmFutureMessages) } if confirmed_messages.total_messages() > max_allowed_messages { // that the relayer has declared correct number of messages that the proof contains (it @@ -127,7 +127,7 @@ impl OutboundLane { confirmed_messages.total_messages(), max_allowed_messages, ); - return Err(ReceivalConfirmationError::TryingToConfirmMoreMessagesThanExpected) + return Err(ReceptionConfirmationError::TryingToConfirmMoreMessagesThanExpected) } ensure_unrewarded_relayers_are_correct(confirmed_messages.end, relayers)?; @@ -176,24 +176,24 @@ impl OutboundLane { fn ensure_unrewarded_relayers_are_correct( latest_received_nonce: MessageNonce, relayers: &VecDeque>, -) -> Result<(), ReceivalConfirmationError> { +) -> Result<(), ReceptionConfirmationError> { let mut expected_entry_begin = relayers.front().map(|entry| entry.messages.begin); for entry in relayers { // unrewarded relayer entry must have at least 1 unconfirmed message // (guaranteed by the `InboundLane::receive_message()`) if entry.messages.end < entry.messages.begin { - return Err(ReceivalConfirmationError::EmptyUnrewardedRelayerEntry) + return Err(ReceptionConfirmationError::EmptyUnrewardedRelayerEntry) } // every entry must confirm range of messages that follows previous entry range // (guaranteed by the `InboundLane::receive_message()`) if expected_entry_begin != Some(entry.messages.begin) { - return Err(ReceivalConfirmationError::NonConsecutiveUnrewardedRelayerEntries) + return Err(ReceptionConfirmationError::NonConsecutiveUnrewardedRelayerEntries) } expected_entry_begin = entry.messages.end.checked_add(1); // entry can't confirm messages larger than `inbound_lane_data.latest_received_nonce()` // (guaranteed by the `InboundLane::receive_message()`) if entry.messages.end > latest_received_nonce { - return Err(ReceivalConfirmationError::FailedToConfirmFutureMessages) + return Err(ReceptionConfirmationError::FailedToConfirmFutureMessages) } } @@ -228,7 +228,7 @@ mod tests { fn assert_3_messages_confirmation_fails( latest_received_nonce: MessageNonce, relayers: &VecDeque>, - ) -> Result, ReceivalConfirmationError> { + ) -> Result, ReceptionConfirmationError> { run_test(|| { let mut lane = outbound_lane::(TEST_LANE_ID); lane.send_message(outbound_message_data(REGULAR_PAYLOAD)); @@ -299,7 +299,7 @@ mod tests { fn confirm_delivery_rejects_nonce_larger_than_last_generated() { assert_eq!( assert_3_messages_confirmation_fails(10, &unrewarded_relayers(1..=10),), - Err(ReceivalConfirmationError::FailedToConfirmFutureMessages), + Err(ReceptionConfirmationError::FailedToConfirmFutureMessages), ); } @@ -314,7 +314,7 @@ mod tests { .chain(unrewarded_relayers(3..=3).into_iter()) .collect(), ), - Err(ReceivalConfirmationError::FailedToConfirmFutureMessages), + Err(ReceptionConfirmationError::FailedToConfirmFutureMessages), ); } @@ -330,7 +330,7 @@ mod tests { .chain(unrewarded_relayers(2..=3).into_iter()) .collect(), ), - Err(ReceivalConfirmationError::EmptyUnrewardedRelayerEntry), + Err(ReceptionConfirmationError::EmptyUnrewardedRelayerEntry), ); } @@ -345,7 +345,7 @@ mod tests { .chain(unrewarded_relayers(2..=2).into_iter()) .collect(), ), - Err(ReceivalConfirmationError::NonConsecutiveUnrewardedRelayerEntries), + Err(ReceptionConfirmationError::NonConsecutiveUnrewardedRelayerEntries), ); } @@ -409,11 +409,11 @@ mod tests { lane.send_message(outbound_message_data(REGULAR_PAYLOAD)); assert_eq!( lane.confirm_delivery(0, 3, &unrewarded_relayers(1..=3)), - Err(ReceivalConfirmationError::TryingToConfirmMoreMessagesThanExpected), + Err(ReceptionConfirmationError::TryingToConfirmMoreMessagesThanExpected), ); assert_eq!( lane.confirm_delivery(2, 3, &unrewarded_relayers(1..=3)), - Err(ReceivalConfirmationError::TryingToConfirmMoreMessagesThanExpected), + Err(ReceptionConfirmationError::TryingToConfirmMoreMessagesThanExpected), ); assert_eq!( lane.confirm_delivery(3, 3, &unrewarded_relayers(1..=3)), diff --git a/bridges/modules/parachains/Cargo.toml b/bridges/modules/parachains/Cargo.toml index 3f4569897f02..bd1592fe3813 100644 --- a/bridges/modules/parachains/Cargo.toml +++ b/bridges/modules/parachains/Cargo.toml @@ -5,14 +5,15 @@ description = "Module that allows bridged relay chains to exchange information o authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Bridge Dependencies diff --git a/bridges/modules/parachains/src/mock.rs b/bridges/modules/parachains/src/mock.rs index 3af3fd3e7639..d9cbabf850ec 100644 --- a/bridges/modules/parachains/src/mock.rs +++ b/bridges/modules/parachains/src/mock.rs @@ -261,7 +261,7 @@ impl Chain for TestBridgedChain { impl ChainWithGrandpa for TestBridgedChain { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = ""; const MAX_AUTHORITIES_COUNT: u32 = 16; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 8; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = 8; const MAX_MANDATORY_HEADER_SIZE: u32 = 256; const AVERAGE_HEADER_SIZE: u32 = 64; } @@ -294,7 +294,7 @@ impl Chain for OtherBridgedChain { impl ChainWithGrandpa for OtherBridgedChain { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = ""; const MAX_AUTHORITIES_COUNT: u32 = 16; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 8; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = 8; const MAX_MANDATORY_HEADER_SIZE: u32 = 256; const AVERAGE_HEADER_SIZE: u32 = 64; } diff --git a/bridges/modules/relayers/Cargo.toml b/bridges/modules/relayers/Cargo.toml index 8a086d6b7f23..acec732bcd23 100644 --- a/bridges/modules/relayers/Cargo.toml +++ b/bridges/modules/relayers/Cargo.toml @@ -5,14 +5,15 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Bridge dependencies diff --git a/bridges/modules/xcm-bridge-hub-router/Cargo.toml b/bridges/modules/xcm-bridge-hub-router/Cargo.toml index 4a42b9616de0..396d4bcc4c16 100644 --- a/bridges/modules/xcm-bridge-hub-router/Cargo.toml +++ b/bridges/modules/xcm-bridge-hub-router/Cargo.toml @@ -5,14 +5,15 @@ version = "0.5.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive", "serde"] } # Bridge dependencies diff --git a/bridges/modules/xcm-bridge-hub-router/src/lib.rs b/bridges/modules/xcm-bridge-hub-router/src/lib.rs index 07341cb1eab7..0c2a0603c97a 100644 --- a/bridges/modules/xcm-bridge-hub-router/src/lib.rs +++ b/bridges/modules/xcm-bridge-hub-router/src/lib.rs @@ -429,7 +429,7 @@ mod tests { run_test(|| { Bridge::::put(uncongested_bridge(FixedU128::from_rational(125, 100))); - // it shold eventually decreased to one + // it should eventually decreased to one while XcmBridgeHubRouter::bridge().delivery_fee_factor > MINIMAL_DELIVERY_FEE_FACTOR { XcmBridgeHubRouter::on_initialize(One::one()); } diff --git a/bridges/modules/xcm-bridge-hub/Cargo.toml b/bridges/modules/xcm-bridge-hub/Cargo.toml index 3167b03ea2d0..3591e8341b7e 100644 --- a/bridges/modules/xcm-bridge-hub/Cargo.toml +++ b/bridges/modules/xcm-bridge-hub/Cargo.toml @@ -1,18 +1,19 @@ [package] name = "pallet-xcm-bridge-hub" -description = "Module that adds dynamic bridges/lanes support to XCM infrastucture at the bridge hub." +description = "Module that adds dynamic bridges/lanes support to XCM infrastructure at the bridge hub." version = "0.2.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Bridge Dependencies bp-messages = { path = "../../primitives/messages", default-features = false } diff --git a/bridges/primitives/header-chain/Cargo.toml b/bridges/primitives/header-chain/Cargo.toml index fb8426d57f11..710bdbb449ae 100644 --- a/bridges/primitives/header-chain/Cargo.toml +++ b/bridges/primitives/header-chain/Cargo.toml @@ -5,14 +5,15 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } finality-grandpa = { version = "0.16.2", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], workspace = true } # Bridge dependencies diff --git a/bridges/primitives/header-chain/src/justification/mod.rs b/bridges/primitives/header-chain/src/justification/mod.rs index 0a48b721e7ea..8944d3c3117d 100644 --- a/bridges/primitives/header-chain/src/justification/mod.rs +++ b/bridges/primitives/header-chain/src/justification/mod.rs @@ -84,7 +84,7 @@ impl GrandpaJustification { .saturating_add(HashOf::::max_encoded_len().saturated_into()); let max_expected_votes_ancestries_size = - C::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY.saturating_mul(C::AVERAGE_HEADER_SIZE); + C::REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY.saturating_mul(C::AVERAGE_HEADER_SIZE); // justification is round number (u64=8b), a signed GRANDPA commit and the // `votes_ancestries` vector diff --git a/bridges/primitives/header-chain/src/justification/verification/mod.rs b/bridges/primitives/header-chain/src/justification/verification/mod.rs index da2b15d4a878..c61a351de36e 100644 --- a/bridges/primitives/header-chain/src/justification/verification/mod.rs +++ b/bridges/primitives/header-chain/src/justification/verification/mod.rs @@ -319,7 +319,7 @@ trait JustificationVerifier { } // check that the cumulative weight of validators that voted for the justification target - // (or one of its descendents) is larger than the required threshold. + // (or one of its descendants) is larger than the required threshold. if cumulative_weight < threshold { return Err(Error::TooLowCumulativeWeight) } diff --git a/bridges/primitives/header-chain/src/lib.rs b/bridges/primitives/header-chain/src/lib.rs index 5dabfbe1b05b..b5df345c176f 100644 --- a/bridges/primitives/header-chain/src/lib.rs +++ b/bridges/primitives/header-chain/src/lib.rs @@ -286,7 +286,7 @@ pub trait ChainWithGrandpa: Chain { /// ancestry and the pallet will accept such justification. The limit is only used to compute /// maximal refund amount and submitting justifications which exceed the limit, may be costly /// to submitter. - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32; /// Maximal size of the mandatory chain header. Mandatory header is the header that enacts new /// GRANDPA authorities set (so it has large digest inside). @@ -320,8 +320,8 @@ where const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = ::WITH_CHAIN_GRANDPA_PALLET_NAME; const MAX_AUTHORITIES_COUNT: u32 = ::MAX_AUTHORITIES_COUNT; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = - ::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = + ::REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY; const MAX_MANDATORY_HEADER_SIZE: u32 = ::MAX_MANDATORY_HEADER_SIZE; const AVERAGE_HEADER_SIZE: u32 = ::AVERAGE_HEADER_SIZE; @@ -376,7 +376,7 @@ mod tests { impl ChainWithGrandpa for TestChain { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = "Test"; const MAX_AUTHORITIES_COUNT: u32 = 128; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 2; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = 2; const MAX_MANDATORY_HEADER_SIZE: u32 = 100_000; const AVERAGE_HEADER_SIZE: u32 = 1_024; } diff --git a/bridges/primitives/messages/Cargo.toml b/bridges/primitives/messages/Cargo.toml index 5a72e94f06db..9c3e97d521c5 100644 --- a/bridges/primitives/messages/Cargo.toml +++ b/bridges/primitives/messages/Cargo.toml @@ -5,13 +5,14 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["bit-vec", "derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive"] } serde = { features = ["alloc", "derive"], workspace = true } # Bridge dependencies diff --git a/bridges/primitives/messages/src/lib.rs b/bridges/primitives/messages/src/lib.rs index 74553f747795..d33af5f0c3d3 100644 --- a/bridges/primitives/messages/src/lib.rs +++ b/bridges/primitives/messages/src/lib.rs @@ -294,27 +294,27 @@ pub struct ReceivedMessages { /// Id of the lane which is receiving messages. pub lane: LaneId, /// Result of messages which we tried to dispatch - pub receive_results: Vec<(MessageNonce, ReceivalResult)>, + pub receive_results: Vec<(MessageNonce, ReceptionResult)>, } impl ReceivedMessages { /// Creates new `ReceivedMessages` structure from given results. pub fn new( lane: LaneId, - receive_results: Vec<(MessageNonce, ReceivalResult)>, + receive_results: Vec<(MessageNonce, ReceptionResult)>, ) -> Self { ReceivedMessages { lane, receive_results } } /// Push `result` of the `message` delivery onto `receive_results` vector. - pub fn push(&mut self, message: MessageNonce, result: ReceivalResult) { + pub fn push(&mut self, message: MessageNonce, result: ReceptionResult) { self.receive_results.push((message, result)); } } /// Result of single message receival. #[derive(RuntimeDebug, Encode, Decode, PartialEq, Eq, Clone, TypeInfo)] -pub enum ReceivalResult { +pub enum ReceptionResult { /// Message has been received and dispatched. Note that we don't care whether dispatch has /// been successful or not - in both case message falls into this category. /// diff --git a/bridges/primitives/parachains/Cargo.toml b/bridges/primitives/parachains/Cargo.toml index c354806bd13d..ade28fde1cc9 100644 --- a/bridges/primitives/parachains/Cargo.toml +++ b/bridges/primitives/parachains/Cargo.toml @@ -5,14 +5,15 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2" -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Bridge dependencies diff --git a/bridges/primitives/polkadot-core/Cargo.toml b/bridges/primitives/polkadot-core/Cargo.toml index 4b0b0619ad24..7c20369f4c55 100644 --- a/bridges/primitives/polkadot-core/Cargo.toml +++ b/bridges/primitives/polkadot-core/Cargo.toml @@ -5,14 +5,15 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } parity-util-mem = { version = "0.12.0", optional = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } # Bridge Dependencies diff --git a/bridges/primitives/polkadot-core/src/lib.rs b/bridges/primitives/polkadot-core/src/lib.rs index 0d82f9f10735..dce4b5639f3f 100644 --- a/bridges/primitives/polkadot-core/src/lib.rs +++ b/bridges/primitives/polkadot-core/src/lib.rs @@ -73,7 +73,7 @@ pub const MAX_AUTHORITIES_COUNT: u32 = 1_256; /// justifications with any additional headers in votes ancestry, so reasonable headers may /// be set to zero. But we assume that there may be small GRANDPA lags, so we're leaving some /// reserve here. -pub const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 2; +pub const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = 2; /// Average header size in `votes_ancestries` field of justification on Polkadot-like /// chains. diff --git a/bridges/primitives/relayers/Cargo.toml b/bridges/primitives/relayers/Cargo.toml index e0d834f488d3..820749c4b96b 100644 --- a/bridges/primitives/relayers/Cargo.toml +++ b/bridges/primitives/relayers/Cargo.toml @@ -5,13 +5,14 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["bit-vec", "derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive"] } # Bridge Dependencies diff --git a/bridges/primitives/runtime/Cargo.toml b/bridges/primitives/runtime/Cargo.toml index ab1355ed68ef..5152303cba5b 100644 --- a/bridges/primitives/runtime/Cargo.toml +++ b/bridges/primitives/runtime/Cargo.toml @@ -5,17 +5,18 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } hash-db = { version = "0.16.0", default-features = false } impl-trait-for-tuples = "0.2.2" log = { workspace = true } num-traits = { version = "0.2", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], workspace = true } # Substrate Dependencies diff --git a/bridges/primitives/runtime/src/chain.rs b/bridges/primitives/runtime/src/chain.rs index 3b83a68f9e34..2677d2664322 100644 --- a/bridges/primitives/runtime/src/chain.rs +++ b/bridges/primitives/runtime/src/chain.rs @@ -106,7 +106,7 @@ pub trait Chain: Send + Sync + 'static { const ID: ChainId; /// A type that fulfills the abstract idea of what a Substrate block number is. - // Constraits come from the associated Number type of `sp_runtime::traits::Header` + // Constraints come from the associated Number type of `sp_runtime::traits::Header` // See here for more info: // https://crates.parity.io/sp_runtime/traits/trait.Header.html#associatedtype.Number // @@ -127,7 +127,7 @@ pub trait Chain: Send + Sync + 'static { + MaxEncodedLen; /// A type that fulfills the abstract idea of what a Substrate hash is. - // Constraits come from the associated Hash type of `sp_runtime::traits::Header` + // Constraints come from the associated Hash type of `sp_runtime::traits::Header` // See here for more info: // https://crates.parity.io/sp_runtime/traits/trait.Header.html#associatedtype.Hash type Hash: Parameter @@ -145,7 +145,7 @@ pub trait Chain: Send + Sync + 'static { /// A type that fulfills the abstract idea of what a Substrate hasher (a type /// that produces hashes) is. - // Constraits come from the associated Hashing type of `sp_runtime::traits::Header` + // Constraints come from the associated Hashing type of `sp_runtime::traits::Header` // See here for more info: // https://crates.parity.io/sp_runtime/traits/trait.Header.html#associatedtype.Hashing type Hasher: HashT; diff --git a/bridges/primitives/runtime/src/lib.rs b/bridges/primitives/runtime/src/lib.rs index 2be7d69996ae..e2cb819d9016 100644 --- a/bridges/primitives/runtime/src/lib.rs +++ b/bridges/primitives/runtime/src/lib.rs @@ -60,7 +60,7 @@ mod chain; mod storage_proof; mod storage_types; -// Re-export macro to aviod include paste dependency everywhere +// Re-export macro to avoid include paste dependency everywhere pub use sp_runtime::paste; /// Use this when something must be shared among all instances. @@ -465,7 +465,7 @@ macro_rules! generate_static_str_provider { }; } -/// Error message that is only dispayable in `std` environment. +/// Error message that is only displayable in `std` environment. #[derive(Encode, Decode, Clone, Eq, PartialEq, PalletError, TypeInfo)] #[scale_info(skip_type_params(T))] pub struct StrippableError { diff --git a/bridges/primitives/test-utils/Cargo.toml b/bridges/primitives/test-utils/Cargo.toml index f27ca5ba3931..d41d8b6d3218 100644 --- a/bridges/primitives/test-utils/Cargo.toml +++ b/bridges/primitives/test-utils/Cargo.toml @@ -5,6 +5,7 @@ description = "Utilities for testing substrate-based runtime bridge code" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true @@ -14,7 +15,7 @@ bp-header-chain = { path = "../header-chain", default-features = false } bp-parachains = { path = "../parachains", default-features = false } bp-polkadot-core = { path = "../polkadot-core", default-features = false } bp-runtime = { path = "../runtime", default-features = false } -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } ed25519-dalek = { version = "2.1", default-features = false } finality-grandpa = { version = "0.16.2", default-features = false } sp-application-crypto = { path = "../../../substrate/primitives/application-crypto", default-features = false } diff --git a/bridges/primitives/test-utils/src/lib.rs b/bridges/primitives/test-utils/src/lib.rs index 9e692a96db69..5148d3f35536 100644 --- a/bridges/primitives/test-utils/src/lib.rs +++ b/bridges/primitives/test-utils/src/lib.rs @@ -91,7 +91,7 @@ pub fn make_default_justification(header: &H) -> GrandpaJustificatio /// Generate justifications in a way where we are able to tune the number of pre-commits /// and vote ancestries which are included in the justification. /// -/// This is useful for benchmarkings where we want to generate valid justifications with +/// This is useful for benchmarks where we want to generate valid justifications with /// a specific number of pre-commits (tuned with the number of "authorities") and/or a specific /// number of vote ancestries (tuned with the "votes" parameter). /// diff --git a/bridges/primitives/xcm-bridge-hub-router/Cargo.toml b/bridges/primitives/xcm-bridge-hub-router/Cargo.toml index 9297a8603c0a..94eece16d579 100644 --- a/bridges/primitives/xcm-bridge-hub-router/Cargo.toml +++ b/bridges/primitives/xcm-bridge-hub-router/Cargo.toml @@ -5,13 +5,14 @@ version = "0.6.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["bit-vec", "derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive"] } # Substrate Dependencies sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } diff --git a/bridges/primitives/xcm-bridge-hub/Cargo.toml b/bridges/primitives/xcm-bridge-hub/Cargo.toml index 1c71a5cce7e8..d6ca3127dd82 100644 --- a/bridges/primitives/xcm-bridge-hub/Cargo.toml +++ b/bridges/primitives/xcm-bridge-hub/Cargo.toml @@ -5,6 +5,7 @@ version = "0.2.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true diff --git a/bridges/scripts/verify-pallets-build.sh b/bridges/scripts/verify-pallets-build.sh index b96bbf1833b6..9c57a2a3c476 100755 --- a/bridges/scripts/verify-pallets-build.sh +++ b/bridges/scripts/verify-pallets-build.sh @@ -68,6 +68,7 @@ rm -rf $BRIDGES_FOLDER/modules/beefy rm -rf $BRIDGES_FOLDER/modules/shift-session-manager rm -rf $BRIDGES_FOLDER/primitives/beefy rm -rf $BRIDGES_FOLDER/relays +rm -rf $BRIDGES_FOLDER/relay-clients rm -rf $BRIDGES_FOLDER/scripts/add_license.sh rm -rf $BRIDGES_FOLDER/scripts/build-containers.sh rm -rf $BRIDGES_FOLDER/scripts/ci-cache.sh @@ -77,6 +78,7 @@ rm -rf $BRIDGES_FOLDER/scripts/regenerate_runtimes.sh rm -rf $BRIDGES_FOLDER/scripts/update-weights.sh rm -rf $BRIDGES_FOLDER/scripts/update-weights-setup.sh rm -rf $BRIDGES_FOLDER/scripts/update_substrate.sh +rm -rf $BRIDGES_FOLDER/substrate-relay rm -rf $BRIDGES_FOLDER/tools rm -f $BRIDGES_FOLDER/.dockerignore rm -f $BRIDGES_FOLDER/local.Dockerfile.dockerignore @@ -89,6 +91,7 @@ rm -f $BRIDGES_FOLDER/local.Dockerfile rm -f $BRIDGES_FOLDER/CODEOWNERS rm -f $BRIDGES_FOLDER/Dockerfile rm -f $BRIDGES_FOLDER/rustfmt.toml +rm -f $BRIDGES_FOLDER/RELEASE.md # let's fix Cargo.toml a bit (it'll be helpful if we are in the bridges repo) if [[ ! -f "Cargo.toml" ]]; then @@ -131,7 +134,7 @@ cargo check -p bridge-runtime-common cargo check -p bridge-runtime-common --features runtime-benchmarks cargo check -p bridge-runtime-common --features integrity-test -# we're removing lock file after all chechs are done. Otherwise we may use different +# we're removing lock file after all checks are done. Otherwise we may use different # Substrate/Polkadot/Cumulus commits and our checks will fail rm -f $BRIDGES_FOLDER/Cargo.lock diff --git a/bridges/snowbridge/pallets/ethereum-client/Cargo.toml b/bridges/snowbridge/pallets/ethereum-client/Cargo.toml index a9e04f4c0259..f89c2a00c1d9 100644 --- a/bridges/snowbridge/pallets/ethereum-client/Cargo.toml +++ b/bridges/snowbridge/pallets/ethereum-client/Cargo.toml @@ -35,7 +35,7 @@ sp-io = { path = "../../../../substrate/primitives/io", default-features = false snowbridge-core = { path = "../../primitives/core", default-features = false } snowbridge-ethereum = { path = "../../primitives/ethereum", default-features = false } -snowbridge-pallet-ethereum-client-fixtures = { path = "./fixtures", default-features = false, optional = true } +snowbridge-pallet-ethereum-client-fixtures = { path = "fixtures", default-features = false, optional = true } primitives = { package = "snowbridge-beacon-primitives", path = "../../primitives/beacon", default-features = false } static_assertions = { version = "1.1.0", default-features = false } bp-runtime = { path = "../../../primitives/runtime", default-features = false } @@ -47,7 +47,7 @@ sp-keyring = { path = "../../../../substrate/primitives/keyring" } serde_json = { workspace = true, default-features = true } hex-literal = "0.4.1" pallet-timestamp = { path = "../../../../substrate/frame/timestamp" } -snowbridge-pallet-ethereum-client-fixtures = { path = "./fixtures" } +snowbridge-pallet-ethereum-client-fixtures = { path = "fixtures" } sp-io = { path = "../../../../substrate/primitives/io" } serde = { workspace = true, default-features = true } diff --git a/bridges/snowbridge/pallets/ethereum-client/fixtures/src/lib.rs b/bridges/snowbridge/pallets/ethereum-client/fixtures/src/lib.rs index cbf44ad3d05f..a28b438cc1b2 100644 --- a/bridges/snowbridge/pallets/ethereum-client/fixtures/src/lib.rs +++ b/bridges/snowbridge/pallets/ethereum-client/fixtures/src/lib.rs @@ -10,9 +10,10 @@ extern crate alloc; use alloc::{boxed::Box, vec}; use hex_literal::hex; use snowbridge_beacon_primitives::{ - types::deneb, updates::AncestryProof, BeaconHeader, ExecutionHeaderUpdate, - NextSyncCommitteeUpdate, SyncAggregate, SyncCommittee, VersionedExecutionPayloadHeader, + types::deneb, AncestryProof, BeaconHeader, ExecutionProof, NextSyncCommitteeUpdate, + SyncAggregate, SyncCommittee, VersionedExecutionPayloadHeader, }; +use snowbridge_core::inbound::{InboundQueueFixture, Log, Message, Proof}; use sp_core::U256; const SC_SIZE: usize = 512; @@ -23,11 +24,11 @@ type Update = snowbridge_beacon_primitives::Update; pub fn make_checkpoint() -> Box { Box::new(CheckpointUpdate { header: BeaconHeader { - slot: 2496, - proposer_index: 2, - parent_root: hex!("c99e49787106733eeebab4d93eb326e1f2214575c9d928f0c4ab0da0776f1622").into(), - state_root: hex!("fbf8a08c86ef36bd173e37e733da4a78aa8e85fee99a990e858dd12a59087fde").into(), - body_root: hex!("a2a8ad06901447b2807a9059580a4c40d8a941f325b1343c69f7c7c6c90e4ab0").into(), + slot: 864, + proposer_index: 4, + parent_root: hex!("614e7672f991ac268cd841055973f55e1e42228831a211adef207bb7329be614").into(), + state_root: hex!("5fa8dfca3d760e4242ab46d529144627aa85348a19173b6e081172c701197a4a").into(), + body_root: hex!("0f34c083b1803666bb1ac5e73fa71582731a2cf37d279ff0a3b0cad5a2ff371e").into(), }, current_sync_committee: SyncCommittee { pubkeys: [ @@ -547,20 +548,20 @@ pub fn make_checkpoint() -> Box { aggregate_pubkey: hex!("8fbd66eeec2ff69ef0b836f04b1d67d88bcd4dfd495061964ad757c77abe822a39fa1cd8ed0d4d9bc9276cea73fd745c").into(), }, current_sync_committee_branch: vec![ - hex!("3ade38d498a062b50880a9409e1ca3a7fd4315d91eeb3bb83e56ac6bfe8d6a59").into(), - hex!("93880225bf99a0c5ec22b266ff829837754e9c5edf37a68c05b8f803fd82fa45").into(), - hex!("4c60656ec9a95fcf11030ad309c716b5b15beb7f60a0bcfc7c9d4eff505472ff").into(), - hex!("22d1645fceb4bf9a695043dda19a53e784ec70df6a6b1bd66ea30eba1cca5f2f").into(), - hex!("a8fc6cad84ceefc633ec56c2d031d525e1cb4b51c70eb252919fce5bba9a1fde").into(), + hex!("3ade38d498a062b50880a9409e1ca3a7fd4315d91eeb3bb83e56ac6bfe8d6a59").into(), + hex!("a9e90f89e7f90fd5d79a6bbcaf40ba5cfc05ab1b561ac51c84867c32248d5b1e").into(), + hex!("bd1a76b03e02402bb24a627de1980a80ab17691980271f597b844b89b497ef75").into(), + hex!("07bbcd27c7cad089023db046eda17e8209842b7d97add8b873519e84fe6480e7").into(), + hex!("94c11eeee4cb6192bf40810f23486d8c75dfbc2b6f28d988d6f74435ede243b0").into(), ], validators_root: hex!("270d43e74ce340de4bca2b1936beca0f4f5408d9e78aec4850920baf659d5b69").into(), - block_roots_root: hex!("d160b7687041891b73e54b06fc4e04f82d0fa8fdd76705895e216c6b24709dfe").into(), + block_roots_root: hex!("b9aab9c388c4e4fcd899b71f62c498fc73406e38e8eb14aa440e9affa06f2a10").into(), block_roots_branch: vec![ - hex!("105290e42d98ab6a0ada6e55453cede36c672abf645eeb986b88d7487616e135").into(), - hex!("9da41f274bcdf6122335443d9ce94d07163b48dba3e2f9499ff56f4e48b48b99").into(), - hex!("ecea7e1d3152d8130e83afdfe34b4de4ba2b69a33c9471991096daf454de9cf5").into(), - hex!("b2bf1758e50b2bfff29169fbc70fdb884b2b05bb615dbc53567574da6f4f1ae2").into(), - hex!("cd87069daf70975779126d6af833b7d636c75ca4d5e750ebcad0e76408a5e5bf").into(), + hex!("733422bd810895dab74cbbe07c69dd440cbb51f573181ad4dddac30fcdd0f41f").into(), + hex!("9b9eca73ab01d14549c325ba1b4610bb20bf1f8ec2dbd649f9d8cc7f3cea75fa").into(), + hex!("bcc666ad0ad9f9725cbd682bc95589d35b1b53b2a615f1e6e8dd5e086336becf").into(), + hex!("3069b547a08f703a1715016e926cbd64e71f93f64fb68d98d8c8f1ab745c46e5").into(), + hex!("c2de7e1097239404e17b263cfa0473533cc41e903cb03440d633bc5c27314cb4").into(), ], }) } @@ -570,13 +571,13 @@ pub fn make_sync_committee_update() -> Box { attested_header: BeaconHeader { slot: 129, proposer_index: 5, - parent_root: hex!("e32b6c18f029e755b0273dc1c4fa2bc4979794c8286ad40276c1b8a8e36049d8").into(), - state_root: hex!("5ec9dacf25a5f09f20be0c59246b3d8dcfe64bd085b4bac5cec180690339801e").into(), - body_root: hex!("4080cf2412d6ff77fc3164ad6155423a7112f207f173145ec16371a93f481f87").into(), + parent_root: hex!("c2def03fe44a2802130ca1a6d8406e4ccf4f344fec7075d4d84431cd4a8b0904").into(), + state_root: hex!("fa62cde6666add7353d7aedcb61ebe3c6c84b5361e34f814825b1250affb5be4").into(), + body_root: hex!("0f9c69f243fe7b5fa5860396c66c720a9e8b1e526e7914188930497cc4a9134c").into(), }, sync_aggregate: SyncAggregate{ sync_committee_bits: hex!("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), - sync_committee_signature: hex!("a761c3333fbb3d36bc8f65454f898da38001499dcd37494cf3d86940a995399ae649216ba4c985af154f83f72c8b1856079b7636a7a8d7d3f7602df2cbf699edb72b65253e82de4d9cc4db7377eafb22f799129f63f094a21c00675bdd5cc243").into(), + sync_committee_signature: hex!("810cfde2afea3e276256c09bdf1cd321c33dcadeefddcfd24f488e6f756d917cfda90b5b437b3a4b4ef880985afa28a40cf565ec0a82877ddee36adc01d55d9d4a911ae3e22556e4c2636f1c707366fba019fb49450440fcd263d0b054b04bf0").into(), }, signature_slot: 130, next_sync_committee_update: Some(NextSyncCommitteeUpdate { @@ -1099,34 +1100,34 @@ pub fn make_sync_committee_update() -> Box { }, next_sync_committee_branch: vec![ hex!("3ade38d498a062b50880a9409e1ca3a7fd4315d91eeb3bb83e56ac6bfe8d6a59").into(), - hex!("fd1e5ff5d4a15081efe3ff17857b1f95984c9a271b1c41c2f81f43e60c2cc541").into(), - hex!("e1c97f93bb7352d395d1ff8ee29881572cb7eb5d71634783701171dcd30cd93d").into(), - hex!("77fa2170ddbd89b15dae02f2e6cf9f76c8e00d1c4217320acffbe01576d0da61").into(), - hex!("e97288e0627219087a024078d69445f34f0583a6350a7c3c40c39fd1fa6f8d68").into(), + hex!("43276bee17fc9fba3f4866e902f0e5b5b308d79db91154bb8bf819973837a7d9").into(), + hex!("5572348e13ce59446ca0ea7cfeed07579da05f121920a76559e19bda94dd81cd").into(), + hex!("2d58adca9f3c742530de037f1933d6de1920ea4b68581613d4bc32b71547f221").into(), + hex!("7072b3c6577cd5a89b3234968f316f54630bb97eafbdb59e5b61637a9640255f").into(), ], }), finalized_header: BeaconHeader{ slot: 64, proposer_index: 4, - parent_root: hex!("0f7bc2353778c14c7f6dba0fc5fe6eec87228b0d3a5447b61dce67b4d9338de3").into(), - state_root: hex!("feb990de653ce494c0a263f820eaf05a9300dbdc30cb6065ede602827bfccde4").into(), - body_root: hex!("f5235cd8c24f2695fc5b7989926305c10ad8cf5a87d62a739f675f5543df2ec1").into(), + parent_root: hex!("a876486aaad7ddb897f369fd22d0a9903cd61d00c9e0dfe7998dd68d1008c678").into(), + state_root: hex!("818e21c3388575f8ccc9ff17ec79d5a57915bcd31bccf47770f65a18e068416b").into(), + body_root: hex!("1d1f73b864b3bb7e11ff91b56ca1381e0f9ca8122b2c542db88243604c763019").into(), }, finality_branch: vec![ hex!("0200000000000000000000000000000000000000000000000000000000000000").into(), hex!("10c726fac935bf9657cc7476d3cfa7bedec5983dcfb59e8a7df6d0a619e108d7").into(), hex!("98e9116c6bb7f20de18800dc63e73e689d06d6a47d35b5e2b32cf093d475840d").into(), - hex!("e1c97f93bb7352d395d1ff8ee29881572cb7eb5d71634783701171dcd30cd93d").into(), - hex!("77fa2170ddbd89b15dae02f2e6cf9f76c8e00d1c4217320acffbe01576d0da61").into(), - hex!("e97288e0627219087a024078d69445f34f0583a6350a7c3c40c39fd1fa6f8d68").into(), + hex!("5572348e13ce59446ca0ea7cfeed07579da05f121920a76559e19bda94dd81cd").into(), + hex!("2d58adca9f3c742530de037f1933d6de1920ea4b68581613d4bc32b71547f221").into(), + hex!("7072b3c6577cd5a89b3234968f316f54630bb97eafbdb59e5b61637a9640255f").into(), ], - block_roots_root: hex!("6fcdfd1c3fb1bdd421fe59dddfff3855b5ed5e30373887991a0059d019ad12bc").into(), + block_roots_root: hex!("715b08694bef183a6d94b3113d16a7129f89fc3edec85a7e0eaf6ef9153552ef").into(), block_roots_branch: vec![ - hex!("94b59531f172bc24f914bc0c10104ccb158676850f8cc3b47b6ddb7f096ebdd7").into(), - hex!("22470ed9155a938587d44d5fa19217c0f939d8862e504e67cd8cb4d1b960795e").into(), - hex!("feec3ef1a68f93849e71e84f90b99602cccc31868137b6887ca8244a4b979e8e").into(), + hex!("4028c72c71b6ce80ea7d18b2c9471f4e4fa39746261a9921e832a4a2f9bdf7bb").into(), + hex!("75f98062661785d3290b7bd998b64446582baa49210733fd4603e1a97cd45a44").into(), + hex!("6fb757f44052f30c464810f01b0132adfa1a5446d8715b41e9af88eee1ee3e65").into(), hex!("5340ad5877c72dca689ca04bc8fedb78d67a4801d99887937edd8ccd29f87e82").into(), - hex!("f5ff4b0c6190005015889879568f5f0d9c40134c7ec4ffdda47950dcd92395ad").into(), + hex!("f2b3cb56753939a728ccad399a434ca490f018f2f331529ec0d8b2d59c509271").into(), ], }) } @@ -1134,95 +1135,180 @@ pub fn make_sync_committee_update() -> Box { pub fn make_finalized_header_update() -> Box { Box::new(Update { attested_header: BeaconHeader { - slot: 2566, - proposer_index: 6, - parent_root: hex!("6eb9f13a2c496318ce1ab3087bbd872f5c9519a1a7ca8231a2453e3cb523af00").into(), - state_root: hex!("c8cb12766113dff7e46d2917267bf33d0626d99dd47715fcdbc5c65fad3c04b4").into(), - body_root: hex!("d8cfd0d7bc9bc3724417a1655bb0a67c0765ca36197320f4d834150b52ef1420").into(), + slot: 933, + proposer_index: 1, + parent_root: hex!("f5fc63e2780ca302b97aea73fc95d74d702b5afe9a772c2b68f695026337b620").into(), + state_root: hex!("d856d11636bc4d866e78be9e747b222b0977556a367ab42e4085277301438050").into(), + body_root: hex!("5689091ab4eb76c2e876271add4924e1c66ce987c300c24aac2ad8c703e9a33f").into(), }, sync_aggregate: SyncAggregate{ sync_committee_bits: hex!("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), - sync_committee_signature: hex!("9296f9a0387f2cac47008e22ad7c3cd3d49d35384c13e6aa1eacca7dca7c3d2ca81515e50eb3396b9550ed20ef7d8fa2049a186598feb2c00e93728045fcff917733d1827481b8fc95f3913e27fc70112c2490496eb57bb7181f02c3f9fd471f").into(), + sync_committee_signature: hex!("93a3d482fe2a2f7fd2b634169752a8fddf1dc28b23a020b398be8526faf37a74ca0f6db1bed78a9c7256c09a6115235e108e0e8a7ce09287317b0856c4b77dfa5adba6cf4c3ebea5bfa4cd2fcde80fd0a532f2defe65d530201d5d2258796559").into(), }, - signature_slot: 2567, + signature_slot: 934, next_sync_committee_update: None, finalized_header: BeaconHeader { - slot: 2496, - proposer_index: 2, - parent_root: hex!("c99e49787106733eeebab4d93eb326e1f2214575c9d928f0c4ab0da0776f1622").into(), - state_root: hex!("fbf8a08c86ef36bd173e37e733da4a78aa8e85fee99a990e858dd12a59087fde").into(), - body_root: hex!("a2a8ad06901447b2807a9059580a4c40d8a941f325b1343c69f7c7c6c90e4ab0").into(), + slot: 864, + proposer_index: 4, + parent_root: hex!("614e7672f991ac268cd841055973f55e1e42228831a211adef207bb7329be614").into(), + state_root: hex!("5fa8dfca3d760e4242ab46d529144627aa85348a19173b6e081172c701197a4a").into(), + body_root: hex!("0f34c083b1803666bb1ac5e73fa71582731a2cf37d279ff0a3b0cad5a2ff371e").into(), }, finality_branch: vec![ - hex!("4e00000000000000000000000000000000000000000000000000000000000000").into(), + hex!("1b00000000000000000000000000000000000000000000000000000000000000").into(), hex!("10c726fac935bf9657cc7476d3cfa7bedec5983dcfb59e8a7df6d0a619e108d7").into(), hex!("98e9116c6bb7f20de18800dc63e73e689d06d6a47d35b5e2b32cf093d475840d").into(), - hex!("958b8e43347f6df6fa5eb3d62d06a862381a6585aa40640dd1c0de11f1cf89c1").into(), - hex!("f107dce04faa86a28fc5d4a618be9cb8d4fc3c23d6c42c3624f3ff4bf6586a03").into(), - hex!("a501cdc02e86969ac3e4d0c5a36f4f049efaa1ab8cb6693f51d130eb52a80f30").into(), + hex!("f12d9aededc72724e417b518fe6f847684f26f81616243dedf8c551cc7d504f5").into(), + hex!("89a85d0907ab3fd6e00ae385f61d456c6191646404ae7b8d23d0e60440cf4d00").into(), + hex!("9fc943b6020eb61d780d78bcc6f6102a81d2c868d58f36e61c6e286a2dc4d8c2").into(), ], - block_roots_root: hex!("d160b7687041891b73e54b06fc4e04f82d0fa8fdd76705895e216c6b24709dfe").into(), + block_roots_root: hex!("b9aab9c388c4e4fcd899b71f62c498fc73406e38e8eb14aa440e9affa06f2a10").into(), block_roots_branch: vec![ - hex!("105290e42d98ab6a0ada6e55453cede36c672abf645eeb986b88d7487616e135").into(), - hex!("9da41f274bcdf6122335443d9ce94d07163b48dba3e2f9499ff56f4e48b48b99").into(), - hex!("ecea7e1d3152d8130e83afdfe34b4de4ba2b69a33c9471991096daf454de9cf5").into(), - hex!("b2bf1758e50b2bfff29169fbc70fdb884b2b05bb615dbc53567574da6f4f1ae2").into(), - hex!("cd87069daf70975779126d6af833b7d636c75ca4d5e750ebcad0e76408a5e5bf").into(), + hex!("733422bd810895dab74cbbe07c69dd440cbb51f573181ad4dddac30fcdd0f41f").into(), + hex!("9b9eca73ab01d14549c325ba1b4610bb20bf1f8ec2dbd649f9d8cc7f3cea75fa").into(), + hex!("bcc666ad0ad9f9725cbd682bc95589d35b1b53b2a615f1e6e8dd5e086336becf").into(), + hex!("3069b547a08f703a1715016e926cbd64e71f93f64fb68d98d8c8f1ab745c46e5").into(), + hex!("c2de7e1097239404e17b263cfa0473533cc41e903cb03440d633bc5c27314cb4").into(), ] }) } -pub fn make_execution_header_update() -> Box { - Box::new(ExecutionHeaderUpdate { +pub fn make_execution_proof() -> Box { + Box::new(ExecutionProof { header: BeaconHeader { - slot: 215, - proposer_index: 2, - parent_root: hex!("97518f531a252bb6ca547b21aca9da767943ec99211d3b15c804e34c3a523f45").into(), - state_root: hex!("b088b5a3a8c90d6dc919a695cd7bb0267c6f983ea2e675c559ceb8f46cb90b67").into(), - body_root: hex!("0ba23c8224fdd01531d5ad51486353bd524a0b4c20bca704e26d3210616f829b").into(), + slot: 393, + proposer_index: 4, + parent_root: hex!("6545b47a614a1dd4cad042a0cdbbf5be347e8ffcdc02c6c64540d5153acebeef").into(), + state_root: hex!("b62ac34a8cb82497be9542fe2114410c9f6021855b766015406101a1f3d86434").into(), + body_root: hex!("04005fe231e11a5b7b1580cb73b177ae8b338bedd745497e6bb7122126a806db").into(), }, ancestry_proof: Some(AncestryProof { header_branch: vec![ - hex!("97518f531a252bb6ca547b21aca9da767943ec99211d3b15c804e34c3a523f45").into(), - hex!("5ce0db996bd499c2b4f7a93263d5aafd052f420efb617cce6fdd54e25516aa45").into(), - hex!("84f0e373b66011ce774c7061440c0a50a51cce2b4b335395eee3e563d605597f").into(), - hex!("48f9ccc5f9594142c18c3b5c39a99f0549329c6ab3ba06c9a50030eadca87770").into(), - hex!("f89d6e311e05bc75a6f63ce118bccce254551f1a88d54c3b4f773f81f946bd99").into(), - hex!("2edd6d893c22636675147c07dfcdb541a146e87c3f15b51c388be4868246dc9b").into(), - hex!("d76b7de5f856e3208a91a42c9c398a7f4fab35e667bf916346050ae742514a2d").into(), - hex!("83a2e233e76385953ca41de4c3afe60471a61f0cc1b3846b4a0670e3e563b747").into(), - hex!("e783a5a109c2ad74e4eb53e8f6b11b31266a92a9e16c1fd5873109c5d41b282c").into(), - hex!("d4ea1ef3869ee6a0fd0b19d7d70027d144eecd4f1d32cbf47632a0a9069164b9").into(), - hex!("f8179564b58eb93a850d35e4156a04db651106442ad891c3e85155c1762792f1").into(), - hex!("4cbb1edb48cf1e32fb30db60aaaeaf6190ffe4d0c8dbc96cec307daecb78be12").into(), + hex!("6545b47a614a1dd4cad042a0cdbbf5be347e8ffcdc02c6c64540d5153acebeef").into(), + hex!("fa84cc88ca53a72181599ff4eb07d8b444bce023fe2347c3b4f51004c43439d3").into(), + hex!("cadc8ae211c6f2221c9138e829249adf902419c78eb4727a150baa4d9a02cc9d").into(), + hex!("33a89962df08a35c52bd7e1d887cd71fa7803e68787d05c714036f6edf75947c").into(), + hex!("2c9760fce5c2829ef3f25595a703c21eb22d0186ce223295556ed5da663a82cf").into(), + hex!("e1aa87654db79c8a0ecd6c89726bb662fcb1684badaef5cd5256f479e3c622e1").into(), + hex!("aa70d5f314e4a1fbb9c362f3db79b21bf68b328887248651fbd29fc501d0ca97").into(), + hex!("160b6c235b3a1ed4ef5f80b03ee1c76f7bf3f591c92fca9d8663e9221b9f9f0f").into(), + hex!("f68d7dcd6a07a18e9de7b5d2aa1980eb962e11d7dcb584c96e81a7635c8d2535").into(), + hex!("1d5f912dfd6697110dd1ecb5cb8e77952eef57d85deb373572572df62bb157fc").into(), + hex!("ffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b").into(), + hex!("6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220").into(), hex!("b7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f").into(), ], - finalized_block_root: hex!("890a7f23b9ed2160901654be9efc575d6830ca860e2a97866ae3423fb7bd7231").into(), + finalized_block_root: hex!("751414cd97c0624f922b3e80285e9f776b08fa22fd5f87391f2ed7ef571a8d46").into(), }), execution_header: VersionedExecutionPayloadHeader::Deneb(deneb::ExecutionPayloadHeader { - parent_hash: hex!("d82ec63f5c5e6ba61d62f09c188f158e6449b94bdcc31941e68639eec3c4cf7a").into(), + parent_hash: hex!("8092290aa21b7751576440f77edd02a94058429ce50e63a92d620951fb25eda2").into(), fee_recipient: hex!("0000000000000000000000000000000000000000").into(), - state_root: hex!("8b65545fe5f3216b47b6339b9c91ca2b7f1032a970b04246d9e9fb4460ee34c3").into(), - receipts_root: hex!("7b1f61b9714c080ef0be014e01657a15f45f0304b477beebc7ca5596c8033095").into(), - logs_bloom: hex!("00000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000000000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000040004000000000000002000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000200000000000010").into(), - prev_randao: hex!("6d9e2a012d82b1b6cb0a2c1c1ed24cc16dbb56e6e39ae545371e0666ab057862").into(), - block_number: 215, - gas_limit: 64842908, - gas_used: 119301, - timestamp: 1705859527, - extra_data: hex!("d983010d0a846765746888676f312e32312e358664617277696e").into(), + state_root: hex!("96a83e9ddf745346fafcb0b03d57314623df669ed543c110662b21302a0fae8b").into(), + receipts_root: hex!("dccdfceea05036f7b61dcdabadc937945d31e68a8d3dfd4dc85684457988c284").into(), + logs_bloom: hex!("00000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000400000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000080000000000000000000000000000040004000000000000002002002000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000080000000000000000000000000000000000100000000000000000200000200000010").into(), + prev_randao: hex!("62e309d4f5119d1f5c783abc20fc1a549efbab546d8d0b25ff1cfd58be524e67").into(), + block_number: 393, + gas_limit: 54492273, + gas_used: 199644, + timestamp: 1710552813, + extra_data: hex!("d983010d0b846765746888676f312e32312e368664617277696e").into(), base_fee_per_gas: U256::from(7u64), - block_hash: hex!("48498dbfbcfae53a7f4c289ee00747aceea925f6260c50ead5a33e1c55c40f98").into(), - transactions_root: hex!("5ebc1347fe3df0611d4f66b19bd8e1c6f4eaed0371d850f14c83b1c77ea234e6").into(), + block_hash: hex!("6a9810efb9581d30c1a5c9074f27c68ea779a8c1ae31c213241df16225f4e131").into(), + transactions_root: hex!("2cfa6ed7327e8807c7973516c5c32a68ef2459e586e8067e113d081c3bd8c07d").into(), withdrawals_root: hex!("792930bbd5baac43bcc798ee49aa8185ef76bb3b44ba62b91d86ae569e4bb535").into(), blob_gas_used: 0, excess_blob_gas: 0, }), execution_branch: vec![ - hex!("f8c69d3830406d668619bcccc13c8dddde41e863326f7418b241d5924c4ad34a").into(), + hex!("a6833fa629f3286b6916c6e50b8bf089fc9126bee6f64d0413b4e59c1265834d").into(), hex!("b46f0c01805fe212e15907981b757e6c496b0cb06664224655613dcec82505bb").into(), hex!("db56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71").into(), - hex!("f4d6b5cf9c6e212615c3674fa625d04eb1114153fb221ef5ad02aa433fc67cfb").into(), + hex!("d3af7c05c516726be7505239e0b9c7cb53d24abce6b91cdb3b3995f0164a75da").into(), ], }) } + +pub fn make_inbound_fixture() -> InboundQueueFixture { + InboundQueueFixture { + message: Message { + event_log: Log { + address: hex!("eda338e4dc46038493b885327842fd3e301cab39").into(), + topics: vec![ + hex!("7153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84f").into(), + hex!("c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539").into(), + hex!("5f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0").into(), + ], + data: hex!("00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d00e40b54020000000000000000000000000000000000000000000000000000000000").into(), + }, + proof: Proof { + receipt_proof: (vec![ + hex!("dccdfceea05036f7b61dcdabadc937945d31e68a8d3dfd4dc85684457988c284").to_vec(), + hex!("4a98e45a319168b0fc6005ce6b744ee9bf54338e2c0784b976a8578d241ced0f").to_vec(), + ], vec![ + hex!("f851a09c01dd6d2d8de951c45af23d3ad00829ce021c04d6c8acbe1612d456ee320d4980808080808080a04a98e45a319168b0fc6005ce6b744ee9bf54338e2c0784b976a8578d241ced0f8080808080808080").to_vec(), + hex!("f9028c30b9028802f90284018301d205b9010000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000000000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000040004000000000000002000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000200000000000010f90179f85894eda338e4dc46038493b885327842fd3e301cab39e1a0f78bb28d4b1d7da699e5c0bc2be29c2b04b5aab6aacf6298fe5304f9db9c6d7ea000000000000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7df9011c94eda338e4dc46038493b885327842fd3e301cab39f863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a05f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0b8a000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d00e40b54020000000000000000000000000000000000000000000000000000000000").to_vec(), + ]), + execution_proof: ExecutionProof { + header: BeaconHeader { + slot: 393, + proposer_index: 4, + parent_root: hex!("6545b47a614a1dd4cad042a0cdbbf5be347e8ffcdc02c6c64540d5153acebeef").into(), + state_root: hex!("b62ac34a8cb82497be9542fe2114410c9f6021855b766015406101a1f3d86434").into(), + body_root: hex!("04005fe231e11a5b7b1580cb73b177ae8b338bedd745497e6bb7122126a806db").into(), + }, + ancestry_proof: Some(AncestryProof { + header_branch: vec![ + hex!("6545b47a614a1dd4cad042a0cdbbf5be347e8ffcdc02c6c64540d5153acebeef").into(), + hex!("fa84cc88ca53a72181599ff4eb07d8b444bce023fe2347c3b4f51004c43439d3").into(), + hex!("cadc8ae211c6f2221c9138e829249adf902419c78eb4727a150baa4d9a02cc9d").into(), + hex!("33a89962df08a35c52bd7e1d887cd71fa7803e68787d05c714036f6edf75947c").into(), + hex!("2c9760fce5c2829ef3f25595a703c21eb22d0186ce223295556ed5da663a82cf").into(), + hex!("e1aa87654db79c8a0ecd6c89726bb662fcb1684badaef5cd5256f479e3c622e1").into(), + hex!("aa70d5f314e4a1fbb9c362f3db79b21bf68b328887248651fbd29fc501d0ca97").into(), + hex!("160b6c235b3a1ed4ef5f80b03ee1c76f7bf3f591c92fca9d8663e9221b9f9f0f").into(), + hex!("f68d7dcd6a07a18e9de7b5d2aa1980eb962e11d7dcb584c96e81a7635c8d2535").into(), + hex!("1d5f912dfd6697110dd1ecb5cb8e77952eef57d85deb373572572df62bb157fc").into(), + hex!("ffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b").into(), + hex!("6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220").into(), + hex!("b7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f").into(), + ], + finalized_block_root: hex!("751414cd97c0624f922b3e80285e9f776b08fa22fd5f87391f2ed7ef571a8d46").into(), + }), + execution_header: VersionedExecutionPayloadHeader::Deneb(deneb::ExecutionPayloadHeader { + parent_hash: hex!("8092290aa21b7751576440f77edd02a94058429ce50e63a92d620951fb25eda2").into(), + fee_recipient: hex!("0000000000000000000000000000000000000000").into(), + state_root: hex!("96a83e9ddf745346fafcb0b03d57314623df669ed543c110662b21302a0fae8b").into(), + receipts_root: hex!("dccdfceea05036f7b61dcdabadc937945d31e68a8d3dfd4dc85684457988c284").into(), + logs_bloom: hex!("00000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000400000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000080000000000000000000000000000040004000000000000002002002000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000080000000000000000000000000000000000100000000000000000200000200000010").into(), + prev_randao: hex!("62e309d4f5119d1f5c783abc20fc1a549efbab546d8d0b25ff1cfd58be524e67").into(), + block_number: 393, + gas_limit: 54492273, + gas_used: 199644, + timestamp: 1710552813, + extra_data: hex!("d983010d0b846765746888676f312e32312e368664617277696e").into(), + base_fee_per_gas: U256::from(7u64), + block_hash: hex!("6a9810efb9581d30c1a5c9074f27c68ea779a8c1ae31c213241df16225f4e131").into(), + transactions_root: hex!("2cfa6ed7327e8807c7973516c5c32a68ef2459e586e8067e113d081c3bd8c07d").into(), + withdrawals_root: hex!("792930bbd5baac43bcc798ee49aa8185ef76bb3b44ba62b91d86ae569e4bb535").into(), + blob_gas_used: 0, + excess_blob_gas: 0, + }), + execution_branch: vec![ + hex!("a6833fa629f3286b6916c6e50b8bf089fc9126bee6f64d0413b4e59c1265834d").into(), + hex!("b46f0c01805fe212e15907981b757e6c496b0cb06664224655613dcec82505bb").into(), + hex!("db56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71").into(), + hex!("d3af7c05c516726be7505239e0b9c7cb53d24abce6b91cdb3b3995f0164a75da").into(), + ], + } + }, + }, + finalized_header: BeaconHeader { + slot: 864, + proposer_index: 4, + parent_root: hex!("614e7672f991ac268cd841055973f55e1e42228831a211adef207bb7329be614").into(), + state_root: hex!("5fa8dfca3d760e4242ab46d529144627aa85348a19173b6e081172c701197a4a").into(), + body_root: hex!("0f34c083b1803666bb1ac5e73fa71582731a2cf37d279ff0a3b0cad5a2ff371e").into(), + }, + block_roots_root: hex!("b9aab9c388c4e4fcd899b71f62c498fc73406e38e8eb14aa440e9affa06f2a10").into(), + } +} diff --git a/bridges/snowbridge/pallets/ethereum-client/src/benchmarking/mod.rs b/bridges/snowbridge/pallets/ethereum-client/src/benchmarking/mod.rs index e1520cd71539..4b8796b628d7 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/benchmarking/mod.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/benchmarking/mod.rs @@ -65,24 +65,6 @@ mod benchmarks { Ok(()) } - #[benchmark] - fn submit_execution_header() -> Result<(), BenchmarkError> { - let caller: T::AccountId = whitelisted_caller(); - let checkpoint_update = make_checkpoint(); - let finalized_header_update = make_finalized_header_update(); - let execution_header_update = make_execution_header_update(); - let execution_header_hash = execution_header_update.execution_header.block_hash(); - EthereumBeaconClient::::process_checkpoint_update(&checkpoint_update)?; - EthereumBeaconClient::::process_update(&finalized_header_update)?; - - #[extrinsic_call] - _(RawOrigin::Signed(caller.clone()), Box::new(*execution_header_update)); - - assert!(>::contains_key(execution_header_hash)); - - Ok(()) - } - #[benchmark(extra)] fn bls_fast_aggregate_verify_pre_aggregated() -> Result<(), BenchmarkError> { EthereumBeaconClient::::process_checkpoint_update(&make_checkpoint())?; diff --git a/bridges/snowbridge/pallets/ethereum-client/src/impls.rs b/bridges/snowbridge/pallets/ethereum-client/src/impls.rs index 5b1c09bfb400..cbce53ad1775 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/impls.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/impls.rs @@ -1,6 +1,8 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: 2023 Snowfork use super::*; +use frame_support::ensure; +use primitives::ExecutionProof; use snowbridge_core::inbound::{ VerificationError::*, @@ -14,32 +16,13 @@ impl Verifier for Pallet { /// the log should be in the beacon client storage, meaning it has been verified and is an /// ancestor of a finalized beacon block. fn verify(event_log: &Log, proof: &Proof) -> Result<(), VerificationError> { - log::info!( - target: "ethereum-client", - "💫 Verifying message with block hash {}", - proof.block_hash, - ); + Self::verify_execution_proof(&proof.execution_proof) + .map_err(|e| InvalidExecutionProof(e.into()))?; - let header = >::get(proof.block_hash).ok_or(HeaderNotFound)?; - - let receipt = match Self::verify_receipt_inclusion(header.receipts_root, proof) { - Ok(receipt) => receipt, - Err(err) => { - log::error!( - target: "ethereum-client", - "💫 Verification of receipt inclusion failed for block {}: {:?}", - proof.block_hash, - err - ); - return Err(err) - }, - }; - - log::trace!( - target: "ethereum-client", - "💫 Verified receipt inclusion for transaction at index {} in block {}", - proof.tx_index, proof.block_hash, - ); + let receipt = Self::verify_receipt_inclusion( + proof.execution_proof.execution_header.receipts_root(), + &proof.receipt_proof.1, + )?; event_log.validate().map_err(|_| InvalidLog)?; @@ -53,18 +36,11 @@ impl Verifier for Pallet { if !receipt.contains_log(&event_log) { log::error!( target: "ethereum-client", - "💫 Event log not found in receipt for transaction at index {} in block {}", - proof.tx_index, proof.block_hash, + "💫 Event log not found in receipt for transaction", ); return Err(LogNotFound) } - log::info!( - target: "ethereum-client", - "💫 Receipt verification successful for {}", - proof.block_hash, - ); - Ok(()) } } @@ -74,9 +50,9 @@ impl Pallet { /// `proof.block_hash`. pub fn verify_receipt_inclusion( receipts_root: H256, - proof: &Proof, + receipt_proof: &[Vec], ) -> Result { - let result = verify_receipt_proof(receipts_root, &proof.data.1).ok_or(InvalidProof)?; + let result = verify_receipt_proof(receipts_root, receipt_proof).ok_or(InvalidProof)?; match result { Ok(receipt) => Ok(receipt), @@ -90,4 +66,96 @@ impl Pallet { }, } } + + /// Validates an execution header with ancestry_proof against a finalized checkpoint on + /// chain.The beacon header containing the execution header is sent, plus the execution header, + /// along with a proof that the execution header is rooted in the beacon header body. + pub(crate) fn verify_execution_proof(execution_proof: &ExecutionProof) -> DispatchResult { + let latest_finalized_state = + FinalizedBeaconState::::get(LatestFinalizedBlockRoot::::get()) + .ok_or(Error::::NotBootstrapped)?; + // Checks that the header is an ancestor of a finalized header, using slot number. + ensure!( + execution_proof.header.slot <= latest_finalized_state.slot, + Error::::HeaderNotFinalized + ); + + // Gets the hash tree root of the execution header, in preparation for the execution + // header proof (used to check that the execution header is rooted in the beacon + // header body. + let execution_header_root: H256 = execution_proof + .execution_header + .hash_tree_root() + .map_err(|_| Error::::BlockBodyHashTreeRootFailed)?; + + ensure!( + verify_merkle_branch( + execution_header_root, + &execution_proof.execution_branch, + config::EXECUTION_HEADER_SUBTREE_INDEX, + config::EXECUTION_HEADER_DEPTH, + execution_proof.header.body_root + ), + Error::::InvalidExecutionHeaderProof + ); + + let beacon_block_root: H256 = execution_proof + .header + .hash_tree_root() + .map_err(|_| Error::::HeaderHashTreeRootFailed)?; + + match &execution_proof.ancestry_proof { + Some(proof) => { + Self::verify_ancestry_proof( + beacon_block_root, + execution_proof.header.slot, + &proof.header_branch, + proof.finalized_block_root, + )?; + }, + None => { + // If the ancestry proof is not provided, we expect this beacon header to be a + // finalized beacon header. We need to check that the header hash matches the + // finalized header root at the expected slot. + let state = >::get(beacon_block_root) + .ok_or(Error::::ExpectedFinalizedHeaderNotStored)?; + if execution_proof.header.slot != state.slot { + return Err(Error::::ExpectedFinalizedHeaderNotStored.into()) + } + }, + } + + Ok(()) + } + + /// Verify that `block_root` is an ancestor of `finalized_block_root` Used to prove that + /// an execution header is an ancestor of a finalized header (i.e. the blocks are + /// on the same chain). + fn verify_ancestry_proof( + block_root: H256, + block_slot: u64, + block_root_proof: &[H256], + finalized_block_root: H256, + ) -> DispatchResult { + let state = >::get(finalized_block_root) + .ok_or(Error::::ExpectedFinalizedHeaderNotStored)?; + + ensure!(block_slot < state.slot, Error::::HeaderNotFinalized); + + let index_in_array = block_slot % (SLOTS_PER_HISTORICAL_ROOT as u64); + let leaf_index = (SLOTS_PER_HISTORICAL_ROOT as u64) + index_in_array; + + ensure!( + verify_merkle_branch( + block_root, + block_root_proof, + leaf_index as usize, + config::BLOCK_ROOT_AT_INDEX_DEPTH, + state.block_roots_root + ), + Error::::InvalidAncestryMerkleProof + ); + + Ok(()) + } } diff --git a/bridges/snowbridge/pallets/ethereum-client/src/lib.rs b/bridges/snowbridge/pallets/ethereum-client/src/lib.rs index bd99a00680be..c9cac8bb35a0 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/lib.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/lib.rs @@ -10,13 +10,11 @@ //! //! * [`Call::force_checkpoint`]: Set the initial trusted consensus checkpoint. //! * [`Call::set_operating_mode`]: Set the operating mode of the pallet. Can be used to disable -//! processing of conensus updates. +//! processing of consensus updates. //! //! ## Consensus Updates //! //! * [`Call::submit`]: Submit a finalized beacon header with an optional sync committee update -//! * [`Call::submit_execution_header`]: Submit an execution header together with an ancestry proof -//! that can be verified against an already imported finalized beacon header. #![cfg_attr(not(feature = "std"), no_std)] extern crate alloc; @@ -42,8 +40,7 @@ use frame_support::{ use frame_system::ensure_signed; use primitives::{ fast_aggregate_verify, verify_merkle_branch, verify_receipt_proof, BeaconHeader, BlsError, - CompactBeaconState, CompactExecutionHeader, ExecutionHeaderState, ForkData, ForkVersion, - ForkVersions, PublicKeyPrepared, SigningData, + CompactBeaconState, ForkData, ForkVersion, ForkVersions, PublicKeyPrepared, SigningData, }; use snowbridge_core::{BasicOperatingMode, RingBufferMap}; use sp_core::H256; @@ -53,11 +50,7 @@ pub use weights::WeightInfo; use functions::{ compute_epoch, compute_period, decompress_sync_committee_bits, sync_committee_sum, }; -pub use types::ExecutionHeaderBuffer; -use types::{ - CheckpointUpdate, ExecutionHeaderUpdate, FinalizedBeaconStateBuffer, SyncCommitteePrepared, - Update, -}; +use types::{CheckpointUpdate, FinalizedBeaconStateBuffer, SyncCommitteePrepared, Update}; pub use pallet::*; @@ -78,10 +71,7 @@ pub mod pallet { pub struct MaxFinalizedHeadersToKeep(PhantomData); impl Get for MaxFinalizedHeadersToKeep { fn get() -> u32 { - // Consider max latency allowed between LatestFinalizedState and LatestExecutionState is - // the total slots in one sync_committee_period so 1 should be fine we keep 2 periods - // here for redundancy. - const MAX_REDUNDANCY: u32 = 2; + const MAX_REDUNDANCY: u32 = 20; config::EPOCHS_PER_SYNC_COMMITTEE_PERIOD as u32 * MAX_REDUNDANCY } } @@ -94,9 +84,6 @@ pub mod pallet { type RuntimeEvent: From> + IsType<::RuntimeEvent>; #[pallet::constant] type ForkVersions: Get; - /// Maximum number of execution headers to keep - #[pallet::constant] - type MaxExecutionHeadersToKeep: Get; type WeightInfo: WeightInfo; } @@ -107,10 +94,6 @@ pub mod pallet { block_hash: H256, slot: u64, }, - ExecutionHeaderImported { - block_hash: H256, - block_number: u64, - }, SyncCommitteeUpdated { period: u64, }, @@ -132,6 +115,10 @@ pub mod pallet { InvalidExecutionHeaderProof, InvalidAncestryMerkleProof, InvalidBlockRootsRootMerkleProof, + /// The gap between the finalized headers is larger than the sync committee period, + /// rendering execution headers unprovable using ancestry proofs (blocks root size is + /// the same as the sync committee period slots). + InvalidFinalizedHeaderGap, HeaderNotFinalized, BlockBodyHashTreeRootFailed, HeaderHashTreeRootFailed, @@ -189,25 +176,6 @@ pub mod pallet { pub(super) type NextSyncCommittee = StorageValue<_, SyncCommitteePrepared, ValueQuery>; - /// Latest imported execution header - #[pallet::storage] - #[pallet::getter(fn latest_execution_state)] - pub(super) type LatestExecutionState = - StorageValue<_, ExecutionHeaderState, ValueQuery>; - - /// Execution Headers - #[pallet::storage] - pub type ExecutionHeaders = - StorageMap<_, Identity, H256, CompactExecutionHeader, OptionQuery>; - - /// Execution Headers: Current position in ring buffer - #[pallet::storage] - pub type ExecutionHeaderIndex = StorageValue<_, u32, ValueQuery>; - - /// Execution Headers: Mapping of ring buffer index to a pruning candidate - #[pallet::storage] - pub type ExecutionHeaderMapping = StorageMap<_, Identity, u32, H256, ValueQuery>; - /// The current operating mode of the pallet. #[pallet::storage] #[pallet::getter(fn operating_mode)] @@ -246,21 +214,6 @@ pub mod pallet { Ok(()) } - #[pallet::call_index(2)] - #[pallet::weight(T::WeightInfo::submit_execution_header())] - #[transactional] - /// Submits a new execution header update. The relevant related beacon header - /// is also included to prove the execution header, as well as ancestry proof data. - pub fn submit_execution_header( - origin: OriginFor, - update: Box, - ) -> DispatchResult { - ensure_signed(origin)?; - ensure!(!Self::operating_mode().is_halted(), Error::::Halted); - Self::process_execution_header_update(&update)?; - Ok(()) - } - /// Halt or resume all pallet operations. May only be called by root. #[pallet::call_index(3)] #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] @@ -323,41 +276,19 @@ pub mod pallet { >::set(sync_committee_prepared); >::kill(); InitialCheckpointRoot::::set(header_root); - >::kill(); Self::store_validators_root(update.validators_root); - Self::store_finalized_header(header_root, update.header, update.block_roots_root)?; + Self::store_finalized_header(update.header, update.block_roots_root)?; Ok(()) } pub(crate) fn process_update(update: &Update) -> DispatchResult { - Self::cross_check_execution_state()?; Self::verify_update(update)?; Self::apply_update(update)?; Ok(()) } - /// Cross check to make sure that execution header import does not fall too far behind - /// finalised beacon header import. If that happens just return an error and pause - /// processing until execution header processing has caught up. - pub(crate) fn cross_check_execution_state() -> DispatchResult { - let latest_finalized_state = - FinalizedBeaconState::::get(LatestFinalizedBlockRoot::::get()) - .ok_or(Error::::NotBootstrapped)?; - let latest_execution_state = Self::latest_execution_state(); - // The execution header import should be at least within the slot range of a sync - // committee period. - let max_latency = config::EPOCHS_PER_SYNC_COMMITTEE_PERIOD * config::SLOTS_PER_EPOCH; - ensure!( - latest_execution_state.beacon_slot == 0 || - latest_finalized_state.slot < - latest_execution_state.beacon_slot + max_latency as u64, - Error::::ExecutionHeaderTooFarBehind - ); - Ok(()) - } - /// References and strictly follows /// Verifies that provided next sync committee is valid through a series of checks /// (including checking that a sync committee period isn't skipped and that the header is @@ -400,6 +331,17 @@ pub mod pallet { Error::::IrrelevantUpdate ); + // Verify the finalized header gap between the current finalized header and new imported + // header is not larger than the sync committee period, otherwise we cannot do + // ancestry proofs for execution headers in the gap. + ensure!( + latest_finalized_state + .slot + .saturating_add(config::SLOTS_PER_HISTORICAL_ROOT as u64) >= + update.finalized_header.slot, + Error::::InvalidFinalizedHeaderGap + ); + // Verify that the `finality_branch`, if present, confirms `finalized_header` to match // the finalized checkpoint root saved in the state of `attested_header`. let finalized_block_root: H256 = update @@ -521,127 +463,9 @@ pub mod pallet { }; if update.finalized_header.slot > latest_finalized_state.slot { - let finalized_block_root: H256 = update - .finalized_header - .hash_tree_root() - .map_err(|_| Error::::HeaderHashTreeRootFailed)?; - Self::store_finalized_header( - finalized_block_root, - update.finalized_header, - update.block_roots_root, - )?; - } - - Ok(()) - } - - /// Validates an execution header for import. The beacon header containing the execution - /// header is sent, plus the execution header, along with a proof that the execution header - /// is rooted in the beacon header body. - pub(crate) fn process_execution_header_update( - update: &ExecutionHeaderUpdate, - ) -> DispatchResult { - let latest_finalized_state = - FinalizedBeaconState::::get(LatestFinalizedBlockRoot::::get()) - .ok_or(Error::::NotBootstrapped)?; - // Checks that the header is an ancestor of a finalized header, using slot number. - ensure!( - update.header.slot <= latest_finalized_state.slot, - Error::::HeaderNotFinalized - ); - - // Checks that we don't skip execution headers, they need to be imported sequentially. - let latest_execution_state: ExecutionHeaderState = Self::latest_execution_state(); - ensure!( - latest_execution_state.block_number == 0 || - update.execution_header.block_number() == - latest_execution_state.block_number + 1, - Error::::ExecutionHeaderSkippedBlock - ); - - // Gets the hash tree root of the execution header, in preparation for the execution - // header proof (used to check that the execution header is rooted in the beacon - // header body. - let execution_header_root: H256 = update - .execution_header - .hash_tree_root() - .map_err(|_| Error::::BlockBodyHashTreeRootFailed)?; - - ensure!( - verify_merkle_branch( - execution_header_root, - &update.execution_branch, - config::EXECUTION_HEADER_SUBTREE_INDEX, - config::EXECUTION_HEADER_DEPTH, - update.header.body_root - ), - Error::::InvalidExecutionHeaderProof - ); - - let block_root: H256 = update - .header - .hash_tree_root() - .map_err(|_| Error::::HeaderHashTreeRootFailed)?; - - match &update.ancestry_proof { - Some(proof) => { - Self::verify_ancestry_proof( - block_root, - update.header.slot, - &proof.header_branch, - proof.finalized_block_root, - )?; - }, - None => { - // If the ancestry proof is not provided, we expect this header to be a - // finalized header. We need to check that the header hash matches the finalized - // header root at the expected slot. - let state = >::get(block_root) - .ok_or(Error::::ExpectedFinalizedHeaderNotStored)?; - if update.header.slot != state.slot { - return Err(Error::::ExpectedFinalizedHeaderNotStored.into()) - } - }, + Self::store_finalized_header(update.finalized_header, update.block_roots_root)?; } - Self::store_execution_header( - update.execution_header.block_hash(), - update.execution_header.clone().into(), - update.header.slot, - block_root, - ); - - Ok(()) - } - - /// Verify that `block_root` is an ancestor of `finalized_block_root` Used to prove that - /// an execution header is an ancestor of a finalized header (i.e. the blocks are - /// on the same chain). - fn verify_ancestry_proof( - block_root: H256, - block_slot: u64, - block_root_proof: &[H256], - finalized_block_root: H256, - ) -> DispatchResult { - let state = >::get(finalized_block_root) - .ok_or(Error::::ExpectedFinalizedHeaderNotStored)?; - - ensure!(block_slot < state.slot, Error::::HeaderNotFinalized); - - let index_in_array = block_slot % (SLOTS_PER_HISTORICAL_ROOT as u64); - let leaf_index = (SLOTS_PER_HISTORICAL_ROOT as u64) + index_in_array; - - ensure!( - verify_merkle_branch( - block_root, - block_root_proof, - leaf_index as usize, - config::BLOCK_ROOT_AT_INDEX_DEPTH, - state.block_roots_root - ), - Error::::InvalidAncestryMerkleProof - ); - Ok(()) } @@ -666,13 +490,15 @@ pub mod pallet { /// Stores a compacted (slot and block roots root (hash of the `block_roots` beacon state /// field, used for ancestry proof)) beacon state in a ring buffer map, with the header root /// as map key. - fn store_finalized_header( - header_root: H256, + pub fn store_finalized_header( header: BeaconHeader, block_roots_root: H256, ) -> DispatchResult { let slot = header.slot; + let header_root: H256 = + header.hash_tree_root().map_err(|_| Error::::HeaderHashTreeRootFailed)?; + >::insert( header_root, CompactBeaconState { slot: header.slot, block_roots_root }, @@ -691,36 +517,6 @@ pub mod pallet { Ok(()) } - /// Stores the provided execution header in pallet storage. The header is stored - /// in a ring buffer map, with the block hash as map key. The last imported execution - /// header is also kept in storage, for the relayer to check import progress. - pub fn store_execution_header( - block_hash: H256, - header: CompactExecutionHeader, - beacon_slot: u64, - beacon_block_root: H256, - ) { - let block_number = header.block_number; - - >::insert(block_hash, header); - - log::trace!( - target: LOG_TARGET, - "💫 Updated latest execution block at {} to number {}.", - block_hash, - block_number - ); - - LatestExecutionState::::mutate(|s| { - s.beacon_block_root = beacon_block_root; - s.beacon_slot = beacon_slot; - s.block_hash = block_hash; - s.block_number = block_number; - }); - - Self::deposit_event(Event::ExecutionHeaderImported { block_hash, block_number }); - } - /// Stores the validators root in storage. Validators root is the hash tree root of all the /// validators at genesis and is used to used to identify the chain that we are on /// (used in conjunction with the fork version). diff --git a/bridges/snowbridge/pallets/ethereum-client/src/mock.rs b/bridges/snowbridge/pallets/ethereum-client/src/mock.rs index 799b14f4773e..bd6144ebd8f9 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/mock.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/mock.rs @@ -2,12 +2,13 @@ // SPDX-FileCopyrightText: 2023 Snowfork use crate as ethereum_beacon_client; use crate::config; -use frame_support::{derive_impl, parameter_types}; -use hex_literal::hex; +use frame_support::{derive_impl, dispatch::DispatchResult, parameter_types}; use pallet_timestamp; -use primitives::{CompactExecutionHeader, Fork, ForkVersions}; +use primitives::{Fork, ForkVersions}; use snowbridge_core::inbound::{Log, Proof}; +use sp_std::default::Default; use std::{fs::File, path::PathBuf}; + type Block = frame_system::mocking::MockBlock; use sp_runtime::BuildStorage; @@ -20,8 +21,8 @@ where serde_json::from_reader(File::open(filepath).unwrap()) } -pub fn load_execution_header_update_fixture() -> primitives::ExecutionHeaderUpdate { - load_fixture("execution-header-update.json".to_string()).unwrap() +pub fn load_execution_proof_fixture() -> primitives::ExecutionProof { + load_fixture("execution-proof.json".to_string()).unwrap() } pub fn load_checkpoint_update_fixture( @@ -50,41 +51,8 @@ pub fn load_next_finalized_header_update_fixture( } pub fn get_message_verification_payload() -> (Log, Proof) { - ( - Log { - address: hex!("ee9170abfbf9421ad6dd07f6bdec9d89f2b581e0").into(), - topics: vec![ - hex!("1b11dcf133cc240f682dab2d3a8e4cd35c5da8c9cf99adac4336f8512584c5ad").into(), - hex!("00000000000000000000000000000000000000000000000000000000000003e8").into(), - hex!("0000000000000000000000000000000000000000000000000000000000000001").into(), - ], - data: hex!("0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004b000f000000000000000100d184c103f7acc340847eee82a0b909e3358bc28d440edffa1352b13227e8ee646f3ea37456dec701345772617070656420457468657210574554481235003511000000000000000000000000000000000000000000").into(), - }, - Proof { - block_hash: hex!("05aaa60b0f27cce9e71909508527264b77ee14da7b5bf915fcc4e32715333213").into(), - tx_index: 0, - data: (vec![ - hex!("cf0d1c1ba57d1e0edfb59786c7e30c2b7e12bd54612b00cd21c4eaeecedf44fb").to_vec(), - hex!("d21fc4f68ab05bc4dcb23c67008e92c4d466437cdd6ed7aad0c008944c185510").to_vec(), - hex!("b9890f91ca0d77aa2a4adfaf9b9e40c94cac9e638b6d9797923865872944b646").to_vec(), - ], vec![ - hex!("f90131a0b601337b3aa10a671caa724eba641e759399979856141d3aea6b6b4ac59b889ba00c7d5dd48be9060221a02fb8fa213860b4c50d47046c8fa65ffaba5737d569e0a094601b62a1086cd9c9cb71a7ebff9e718f3217fd6e837efe4246733c0a196f63a06a4b0dd0aefc37b3c77828c8f07d1b7a2455ceb5dbfd3c77d7d6aeeddc2f7e8ca0d6e8e23142cdd8ec219e1f5d8b56aa18e456702b195deeaa210327284d42ade4a08a313d4c87023005d1ab631bbfe3f5de1e405d0e66d0bef3e033f1e5711b5521a0bf09a5d9a48b10ade82b8d6a5362a15921c8b5228a3487479b467db97411d82fa0f95cccae2a7c572ef3c566503e30bac2b2feb2d2f26eebf6d870dcf7f8cf59cea0d21fc4f68ab05bc4dcb23c67008e92c4d466437cdd6ed7aad0c008944c1855108080808080808080").to_vec(), - hex!("f851a0b9890f91ca0d77aa2a4adfaf9b9e40c94cac9e638b6d9797923865872944b646a060a634b9280e3a23fb63375e7bbdd9ab07fd379ab6a67e2312bbc112195fa358808080808080808080808080808080").to_vec(), - hex!("f9030820b9030402f90300018301d6e2b9010000000000000800000000000020040008000000000000000000000000400000008000000000000000000000000000000000000000000000000000000000042010000000001000000000000000000000000000000000040000000000000000000000000000000000000000000000008000000000000000002000000000000000000000000200000000000000200000000000100000000040000001000200008000000000000200000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000f901f5f87a942ffa5ecdbe006d30397c7636d3e015eee251369ff842a0c965575a00553e094ca7c5d14f02e107c258dda06867cbf9e0e69f80e71bbcc1a000000000000000000000000000000000000000000000000000000000000003e8a000000000000000000000000000000000000000000000000000000000000003e8f9011c94ee9170abfbf9421ad6dd07f6bdec9d89f2b581e0f863a01b11dcf133cc240f682dab2d3a8e4cd35c5da8c9cf99adac4336f8512584c5ada000000000000000000000000000000000000000000000000000000000000003e8a00000000000000000000000000000000000000000000000000000000000000001b8a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004b000f000000000000000100d184c103f7acc340847eee82a0b909e3358bc28d440edffa1352b13227e8ee646f3ea37456dec701345772617070656420457468657210574554481235003511000000000000000000000000000000000000000000f858948cf6147918a5cbb672703f879f385036f8793a24e1a01449abf21e49fd025f33495e77f7b1461caefdd3d4bb646424a3f445c4576a5ba0000000000000000000000000440edffa1352b13227e8ee646f3ea37456dec701").to_vec(), - ]), - } - ) -} - -pub fn get_message_verification_header() -> CompactExecutionHeader { - CompactExecutionHeader { - parent_hash: hex!("04a7f6ab8282203562c62f38b0ab41d32aaebe2c7ea687702b463148a6429e04") - .into(), - block_number: 55, - state_root: hex!("894d968712976d613519f973a317cb0781c7b039c89f27ea2b7ca193f7befdb3").into(), - receipts_root: hex!("cf0d1c1ba57d1e0edfb59786c7e30c2b7e12bd54612b00cd21c4eaeecedf44fb") - .into(), - } + let inbound_fixture = snowbridge_pallet_ethereum_client_fixtures::make_inbound_fixture(); + (inbound_fixture.message.event_log, inbound_fixture.message.proof) } frame_support::construct_runtime!( @@ -130,20 +98,25 @@ parameter_types! { epoch: 0, } }; - pub const ExecutionHeadersPruneThreshold: u32 = 8192; } impl ethereum_beacon_client::Config for Test { type RuntimeEvent = RuntimeEvent; type ForkVersions = ChainForkVersions; - type MaxExecutionHeadersToKeep = ExecutionHeadersPruneThreshold; type WeightInfo = (); } // Build genesis storage according to the mock runtime. pub fn new_tester() -> sp_io::TestExternalities { let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - let mut ext = sp_io::TestExternalities::new(t); - let _ = ext.execute_with(|| Timestamp::set(RuntimeOrigin::signed(1), 30_000)); + let ext = sp_io::TestExternalities::new(t); ext } + +pub fn initialize_storage() -> DispatchResult { + let inbound_fixture = snowbridge_pallet_ethereum_client_fixtures::make_inbound_fixture(); + EthereumBeaconClient::store_finalized_header( + inbound_fixture.finalized_header, + inbound_fixture.block_roots_root, + ) +} diff --git a/bridges/snowbridge/pallets/ethereum-client/src/tests.rs b/bridges/snowbridge/pallets/ethereum-client/src/tests.rs index 50b6a25c3428..765958c12821 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/tests.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/tests.rs @@ -1,32 +1,26 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: 2023 Snowfork use crate::{ - functions::compute_period, pallet::ExecutionHeaders, sync_committee_sum, verify_merkle_branch, - BeaconHeader, CompactBeaconState, Error, ExecutionHeaderBuffer, FinalizedBeaconState, - LatestExecutionState, LatestFinalizedBlockRoot, NextSyncCommittee, SyncCommitteePrepared, + functions::compute_period, sync_committee_sum, verify_merkle_branch, BeaconHeader, + CompactBeaconState, Error, FinalizedBeaconState, LatestFinalizedBlockRoot, NextSyncCommittee, + SyncCommitteePrepared, }; use crate::mock::{ - get_message_verification_header, get_message_verification_payload, - load_checkpoint_update_fixture, load_execution_header_update_fixture, + get_message_verification_payload, load_checkpoint_update_fixture, load_finalized_header_update_fixture, load_next_finalized_header_update_fixture, load_next_sync_committee_update_fixture, load_sync_committee_update_fixture, }; pub use crate::mock::*; -use crate::config::{EPOCHS_PER_SYNC_COMMITTEE_PERIOD, SLOTS_PER_EPOCH}; +use crate::config::{EPOCHS_PER_SYNC_COMMITTEE_PERIOD, SLOTS_PER_EPOCH, SLOTS_PER_HISTORICAL_ROOT}; use frame_support::{assert_err, assert_noop, assert_ok}; use hex_literal::hex; use primitives::{ - CompactExecutionHeader, ExecutionHeaderState, Fork, ForkVersions, NextSyncCommitteeUpdate, - VersionedExecutionPayloadHeader, -}; -use rand::{thread_rng, Rng}; -use snowbridge_core::{ - inbound::{VerificationError, Verifier}, - RingBufferMap, + types::deneb, Fork, ForkVersions, NextSyncCommitteeUpdate, VersionedExecutionPayloadHeader, }; +use snowbridge_core::inbound::{VerificationError, Verifier}; use sp_core::H256; use sp_runtime::DispatchError; @@ -212,61 +206,6 @@ pub fn sync_committee_participation_is_supermajority_errors_when_not_supermajori }); } -#[test] -pub fn execution_header_pruning() { - new_tester().execute_with(|| { - let execution_header_prune_threshold = ExecutionHeadersPruneThreshold::get(); - let to_be_deleted = execution_header_prune_threshold / 2; - - let mut stored_hashes = vec![]; - - for i in 0..execution_header_prune_threshold { - let mut hash = H256::default(); - thread_rng().try_fill(&mut hash.0[..]).unwrap(); - EthereumBeaconClient::store_execution_header( - hash, - CompactExecutionHeader::default(), - i as u64, - hash, - ); - stored_hashes.push(hash); - } - - // We should have stored everything until now - assert_eq!({ ExecutionHeaders::::iter().count() }, stored_hashes.len()); - - // Let's push extra entries so that some of the previous entries are deleted. - for i in 0..to_be_deleted { - let mut hash = H256::default(); - thread_rng().try_fill(&mut hash.0[..]).unwrap(); - EthereumBeaconClient::store_execution_header( - hash, - CompactExecutionHeader::default(), - (i + execution_header_prune_threshold) as u64, - hash, - ); - - stored_hashes.push(hash); - } - - // We should have only stored upto `execution_header_prune_threshold` - assert_eq!( - ExecutionHeaders::::iter().count() as u32, - execution_header_prune_threshold - ); - - // First `to_be_deleted` items must be deleted - for i in 0..to_be_deleted { - assert!(!ExecutionHeaders::::contains_key(stored_hashes[i as usize])); - } - - // Other entries should be part of data - for i in to_be_deleted..(to_be_deleted + execution_header_prune_threshold) { - assert!(ExecutionHeaders::::contains_key(stored_hashes[i as usize])); - } - }); -} - #[test] fn compute_fork_version() { let mock_fork_versions = ForkVersions { @@ -348,34 +287,6 @@ fn find_present_keys() { }); } -#[test] -fn cross_check_execution_state() { - new_tester().execute_with(|| { - let header_root: H256 = TEST_HASH.into(); - >::insert( - header_root, - CompactBeaconState { - // set slot to period 5 - slot: ((EPOCHS_PER_SYNC_COMMITTEE_PERIOD * SLOTS_PER_EPOCH) * 5) as u64, - block_roots_root: Default::default(), - }, - ); - LatestFinalizedBlockRoot::::set(header_root); - >::set(ExecutionHeaderState { - beacon_block_root: Default::default(), - // set slot to period 2 - beacon_slot: ((EPOCHS_PER_SYNC_COMMITTEE_PERIOD * SLOTS_PER_EPOCH) * 2) as u64, - block_hash: Default::default(), - block_number: 0, - }); - - assert_err!( - EthereumBeaconClient::cross_check_execution_state(), - Error::::ExecutionHeaderTooFarBehind - ); - }); -} - /* SYNC PROCESS TESTS */ #[test] @@ -608,40 +519,6 @@ fn submit_update_with_skipped_sync_committee_period() { }); } -#[test] -fn submit_update_execution_headers_too_far_behind() { - let checkpoint = Box::new(load_checkpoint_update_fixture()); - let finalized_header_update = Box::new(load_finalized_header_update_fixture()); - let execution_header_update = Box::new(load_execution_header_update_fixture()); - let next_update = Box::new(load_next_sync_committee_update_fixture()); - - new_tester().execute_with(|| { - let far_ahead_finalized_header_slot = finalized_header_update.finalized_header.slot + - (EPOCHS_PER_SYNC_COMMITTEE_PERIOD * SLOTS_PER_EPOCH * 2) as u64; - assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); - assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); - assert_ok!(EthereumBeaconClient::submit_execution_header( - RuntimeOrigin::signed(1), - execution_header_update - )); - - let header_root: H256 = TEST_HASH.into(); - >::insert( - header_root, - CompactBeaconState { - slot: far_ahead_finalized_header_slot, - block_roots_root: Default::default(), - }, - ); - LatestFinalizedBlockRoot::::set(header_root); - - assert_err!( - EthereumBeaconClient::submit(RuntimeOrigin::signed(1), next_update), - Error::::ExecutionHeaderTooFarBehind - ); - }); -} - #[test] fn submit_irrelevant_update() { let checkpoint = Box::new(load_checkpoint_update_fixture()); @@ -703,183 +580,57 @@ fn submit_update_with_invalid_sync_committee_update() { }); } +/// Check that a gap of more than 8192 slots between finalized headers is not allowed. #[test] -fn submit_execution_header_update() { +fn submit_finalized_header_update_with_too_large_gap() { let checkpoint = Box::new(load_checkpoint_update_fixture()); - let finalized_header_update = Box::new(load_finalized_header_update_fixture()); - let execution_header_update = Box::new(load_execution_header_update_fixture()); - - new_tester().execute_with(|| { - assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); - assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); - assert_ok!(EthereumBeaconClient::submit_execution_header( - RuntimeOrigin::signed(1), - execution_header_update.clone() - )); - assert!(>::contains_key( - execution_header_update.execution_header.block_hash() - )); - }); -} - -#[test] -fn submit_execution_header_update_invalid_ancestry_proof() { - let checkpoint = Box::new(load_checkpoint_update_fixture()); - let finalized_header_update = Box::new(load_finalized_header_update_fixture()); - let mut execution_header_update = Box::new(load_execution_header_update_fixture()); - if let Some(ref mut ancestry_proof) = execution_header_update.ancestry_proof { - ancestry_proof.header_branch[0] = TEST_HASH.into() - } - - new_tester().execute_with(|| { - assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); - assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); - assert_err!( - EthereumBeaconClient::submit_execution_header( - RuntimeOrigin::signed(1), - execution_header_update - ), - Error::::InvalidAncestryMerkleProof - ); - }); -} - -#[test] -fn submit_execution_header_update_invalid_execution_header_proof() { - let checkpoint = Box::new(load_checkpoint_update_fixture()); - let finalized_header_update = Box::new(load_finalized_header_update_fixture()); - let mut execution_header_update = Box::new(load_execution_header_update_fixture()); - execution_header_update.execution_branch[0] = TEST_HASH.into(); - - new_tester().execute_with(|| { - assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); - assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); - assert_err!( - EthereumBeaconClient::submit_execution_header( - RuntimeOrigin::signed(1), - execution_header_update - ), - Error::::InvalidExecutionHeaderProof - ); - }); -} + let update = Box::new(load_sync_committee_update_fixture()); + let mut next_update = Box::new(load_next_sync_committee_update_fixture()); -#[test] -fn submit_execution_header_update_that_skips_block() { - let checkpoint = Box::new(load_checkpoint_update_fixture()); - let finalized_header_update = Box::new(load_finalized_header_update_fixture()); - let execution_header_update = Box::new(load_execution_header_update_fixture()); - let mut skipped_block_execution_header_update = - Box::new(load_execution_header_update_fixture()); - let mut skipped_execution_header = - skipped_block_execution_header_update.execution_header.clone(); - - skipped_execution_header = match skipped_execution_header { - VersionedExecutionPayloadHeader::Capella(execution_payload_header) => { - let mut mut_execution_payload_header = execution_payload_header.clone(); - mut_execution_payload_header.block_number = execution_payload_header.block_number + 2; - VersionedExecutionPayloadHeader::Capella(mut_execution_payload_header) - }, - VersionedExecutionPayloadHeader::Deneb(execution_payload_header) => { - let mut mut_execution_payload_header = execution_payload_header.clone(); - mut_execution_payload_header.block_number = execution_payload_header.block_number + 2; - VersionedExecutionPayloadHeader::Deneb(mut_execution_payload_header) - }, - }; + // Adds 8193 slots, so that the next update is still in the next sync committee, but the + // gap between the finalized headers is more than 8192 slots. + let slot_with_large_gap = checkpoint.header.slot + SLOTS_PER_HISTORICAL_ROOT as u64 + 1; - skipped_block_execution_header_update.execution_header = skipped_execution_header; + next_update.finalized_header.slot = slot_with_large_gap; + // Adding some slots to the attested header and signature slot since they need to be ahead + // of the finalized header. + next_update.attested_header.slot = slot_with_large_gap + 33; + next_update.signature_slot = slot_with_large_gap + 43; new_tester().execute_with(|| { assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); - assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); - assert_ok!(EthereumBeaconClient::submit_execution_header( - RuntimeOrigin::signed(1), - execution_header_update.clone() - )); - assert!(>::contains_key( - execution_header_update.execution_header.block_hash() - )); - assert_err!( - EthereumBeaconClient::submit_execution_header( - RuntimeOrigin::signed(1), - skipped_block_execution_header_update - ), - Error::::ExecutionHeaderSkippedBlock - ); - }); -} - -#[test] -fn submit_execution_header_update_that_is_also_finalized_header_which_is_not_stored() { - let checkpoint = Box::new(load_checkpoint_update_fixture()); - let finalized_header_update = Box::new(load_finalized_header_update_fixture()); - let mut execution_header_update = Box::new(load_execution_header_update_fixture()); - execution_header_update.ancestry_proof = None; - - new_tester().execute_with(|| { - assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); - assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); + assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), update.clone())); + assert!(>::exists()); assert_err!( - EthereumBeaconClient::submit_execution_header( - RuntimeOrigin::signed(1), - execution_header_update - ), - Error::::ExpectedFinalizedHeaderNotStored + EthereumBeaconClient::submit(RuntimeOrigin::signed(1), next_update.clone()), + Error::::InvalidFinalizedHeaderGap ); }); } +/// Check that a gap of 8192 slots between finalized headers is allowed. #[test] -fn submit_execution_header_update_that_is_also_finalized_header_which_is_stored_but_slots_dont_match( -) { +fn submit_finalized_header_update_with_gap_at_limit() { let checkpoint = Box::new(load_checkpoint_update_fixture()); - let finalized_header_update = Box::new(load_finalized_header_update_fixture()); - let mut execution_header_update = Box::new(load_execution_header_update_fixture()); - execution_header_update.ancestry_proof = None; - - new_tester().execute_with(|| { - assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); - assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); - - let block_root: H256 = execution_header_update.header.hash_tree_root().unwrap(); - - >::insert( - block_root, - CompactBeaconState { - slot: execution_header_update.header.slot + 1, - block_roots_root: Default::default(), - }, - ); - LatestFinalizedBlockRoot::::set(block_root); - - assert_err!( - EthereumBeaconClient::submit_execution_header( - RuntimeOrigin::signed(1), - execution_header_update - ), - Error::::ExpectedFinalizedHeaderNotStored - ); - }); -} + let update = Box::new(load_sync_committee_update_fixture()); + let mut next_update = Box::new(load_next_sync_committee_update_fixture()); -#[test] -fn submit_execution_header_not_finalized() { - let checkpoint = Box::new(load_checkpoint_update_fixture()); - let finalized_header_update = Box::new(load_finalized_header_update_fixture()); - let update = Box::new(load_execution_header_update_fixture()); + next_update.finalized_header.slot = checkpoint.header.slot + SLOTS_PER_HISTORICAL_ROOT as u64; + // Adding some slots to the attested header and signature slot since they need to be ahead + // of the finalized header. + next_update.attested_header.slot = + checkpoint.header.slot + SLOTS_PER_HISTORICAL_ROOT as u64 + 33; + next_update.signature_slot = checkpoint.header.slot + SLOTS_PER_HISTORICAL_ROOT as u64 + 43; new_tester().execute_with(|| { assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); - assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); - - >::mutate(>::get(), |x| { - let prev = x.unwrap(); - *x = Some(CompactBeaconState { slot: update.header.slot - 1, ..prev }); - }); - + assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), update.clone())); + assert!(>::exists()); assert_err!( - EthereumBeaconClient::submit_execution_header(RuntimeOrigin::signed(1), update), - Error::::HeaderNotFinalized + EthereumBeaconClient::submit(RuntimeOrigin::signed(1), next_update.clone()), + // The test should pass the InvalidFinalizedHeaderGap check, and will fail at the + // next check, the merkle proof, because we changed the next_update slots. + Error::::InvalidHeaderMerkleProof ); }); } @@ -888,37 +639,21 @@ fn submit_execution_header_not_finalized() { #[test] fn verify_message() { - let header = get_message_verification_header(); let (event_log, proof) = get_message_verification_payload(); - let block_hash = proof.block_hash; new_tester().execute_with(|| { - >::insert(block_hash, header); + assert_ok!(initialize_storage()); assert_ok!(EthereumBeaconClient::verify(&event_log, &proof)); }); } -#[test] -fn verify_message_missing_header() { - let (event_log, proof) = get_message_verification_payload(); - - new_tester().execute_with(|| { - assert_err!( - EthereumBeaconClient::verify(&event_log, &proof), - VerificationError::HeaderNotFound - ); - }); -} - #[test] fn verify_message_invalid_proof() { - let header = get_message_verification_header(); let (event_log, mut proof) = get_message_verification_payload(); - proof.data.1[0] = TEST_HASH.into(); - let block_hash = proof.block_hash; + proof.receipt_proof.1[0] = TEST_HASH.into(); new_tester().execute_with(|| { - >::insert(block_hash, header); + assert_ok!(initialize_storage()); assert_err!( EthereumBeaconClient::verify(&event_log, &proof), VerificationError::InvalidProof @@ -928,29 +663,28 @@ fn verify_message_invalid_proof() { #[test] fn verify_message_invalid_receipts_root() { - let mut header = get_message_verification_header(); - let (event_log, proof) = get_message_verification_payload(); - let block_hash = proof.block_hash; - header.receipts_root = TEST_HASH.into(); + let (event_log, mut proof) = get_message_verification_payload(); + let mut payload = deneb::ExecutionPayloadHeader::default(); + payload.receipts_root = TEST_HASH.into(); + proof.execution_proof.execution_header = VersionedExecutionPayloadHeader::Deneb(payload); new_tester().execute_with(|| { - >::insert(block_hash, header); + assert_ok!(initialize_storage()); assert_err!( EthereumBeaconClient::verify(&event_log, &proof), - VerificationError::InvalidProof + VerificationError::InvalidExecutionProof( + Error::::BlockBodyHashTreeRootFailed.into() + ) ); }); } #[test] fn verify_message_invalid_log() { - let header = get_message_verification_header(); let (mut event_log, proof) = get_message_verification_payload(); - let block_hash = proof.block_hash; event_log.topics = vec![H256::zero(); 10]; - new_tester().execute_with(|| { - >::insert(block_hash, header); + assert_ok!(initialize_storage()); assert_err!( EthereumBeaconClient::verify(&event_log, &proof), VerificationError::InvalidLog @@ -960,13 +694,11 @@ fn verify_message_invalid_log() { #[test] fn verify_message_receipt_does_not_contain_log() { - let header = get_message_verification_header(); let (mut event_log, proof) = get_message_verification_payload(); - let block_hash = proof.block_hash; event_log.data = hex!("f9013c94ee9170abfbf9421ad6dd07f6bdec9d89f2b581e0f863a01b11dcf133cc240f682dab2d3a8e4cd35c5da8c9cf99adac4336f8512584c5ada000000000000000000000000000000000000000000000000000000000000003e8a00000000000000000000000000000000000000000000000000000000000000002b8c000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000068000f000000000000000101d184c103f7acc340847eee82a0b909e3358bc28d440edffa1352b13227e8ee646f3ea37456dec70100000101001cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c0000e8890423c78a0000000000000000000000000000000000000000000000000000000000000000").to_vec(); new_tester().execute_with(|| { - >::insert(block_hash, header); + assert_ok!(initialize_storage()); assert_err!( EthereumBeaconClient::verify(&event_log, &proof), VerificationError::LogNotFound @@ -978,7 +710,6 @@ fn verify_message_receipt_does_not_contain_log() { fn set_operating_mode() { let checkpoint = Box::new(load_checkpoint_update_fixture()); let update = Box::new(load_finalized_header_update_fixture()); - let execution_header_update = Box::new(load_execution_header_update_fixture()); new_tester().execute_with(|| { assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); @@ -992,14 +723,6 @@ fn set_operating_mode() { EthereumBeaconClient::submit(RuntimeOrigin::signed(1), update), Error::::Halted ); - - assert_noop!( - EthereumBeaconClient::submit_execution_header( - RuntimeOrigin::signed(1), - execution_header_update - ), - Error::::Halted - ); }); } @@ -1015,3 +738,107 @@ fn set_operating_mode_root_only() { ); }); } + +#[test] +fn verify_execution_proof_invalid_ancestry_proof() { + let checkpoint = Box::new(load_checkpoint_update_fixture()); + let finalized_header_update = Box::new(load_finalized_header_update_fixture()); + let mut execution_header_update = Box::new(load_execution_proof_fixture()); + if let Some(ref mut ancestry_proof) = execution_header_update.ancestry_proof { + ancestry_proof.header_branch[0] = TEST_HASH.into() + } + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); + assert_err!( + EthereumBeaconClient::verify_execution_proof(&execution_header_update), + Error::::InvalidAncestryMerkleProof + ); + }); +} + +#[test] +fn verify_execution_proof_invalid_execution_header_proof() { + let checkpoint = Box::new(load_checkpoint_update_fixture()); + let finalized_header_update = Box::new(load_finalized_header_update_fixture()); + let mut execution_header_update = Box::new(load_execution_proof_fixture()); + execution_header_update.execution_branch[0] = TEST_HASH.into(); + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); + assert_err!( + EthereumBeaconClient::verify_execution_proof(&execution_header_update), + Error::::InvalidExecutionHeaderProof + ); + }); +} + +#[test] +fn verify_execution_proof_that_is_also_finalized_header_which_is_not_stored() { + let checkpoint = Box::new(load_checkpoint_update_fixture()); + let finalized_header_update = Box::new(load_finalized_header_update_fixture()); + let mut execution_header_update = Box::new(load_execution_proof_fixture()); + execution_header_update.ancestry_proof = None; + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); + assert_err!( + EthereumBeaconClient::verify_execution_proof(&execution_header_update), + Error::::ExpectedFinalizedHeaderNotStored + ); + }); +} + +#[test] +fn submit_execution_proof_that_is_also_finalized_header_which_is_stored_but_slots_dont_match() { + let checkpoint = Box::new(load_checkpoint_update_fixture()); + let finalized_header_update = Box::new(load_finalized_header_update_fixture()); + let mut execution_header_update = Box::new(load_execution_proof_fixture()); + execution_header_update.ancestry_proof = None; + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); + + let block_root: H256 = execution_header_update.header.hash_tree_root().unwrap(); + + >::insert( + block_root, + CompactBeaconState { + slot: execution_header_update.header.slot + 1, + block_roots_root: Default::default(), + }, + ); + LatestFinalizedBlockRoot::::set(block_root); + + assert_err!( + EthereumBeaconClient::verify_execution_proof(&execution_header_update), + Error::::ExpectedFinalizedHeaderNotStored + ); + }); +} + +#[test] +fn verify_execution_proof_not_finalized() { + let checkpoint = Box::new(load_checkpoint_update_fixture()); + let finalized_header_update = Box::new(load_finalized_header_update_fixture()); + let update = Box::new(load_execution_proof_fixture()); + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); + + >::mutate(>::get(), |x| { + let prev = x.unwrap(); + *x = Some(CompactBeaconState { slot: update.header.slot - 1, ..prev }); + }); + + assert_err!( + EthereumBeaconClient::verify_execution_proof(&update), + Error::::HeaderNotFinalized + ); + }); +} diff --git a/bridges/snowbridge/pallets/ethereum-client/src/types.rs b/bridges/snowbridge/pallets/ethereum-client/src/types.rs index 5dcefea9f80f..8808f989754b 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/types.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/types.rs @@ -15,17 +15,7 @@ pub type CheckpointUpdate = primitives::CheckpointUpdate; pub type Update = primitives::Update; pub type NextSyncCommitteeUpdate = primitives::NextSyncCommitteeUpdate; -pub use primitives::ExecutionHeaderUpdate; - -/// ExecutionHeader ring buffer implementation -pub type ExecutionHeaderBuffer = RingBufferMapImpl< - u32, - ::MaxExecutionHeadersToKeep, - crate::ExecutionHeaderIndex, - crate::ExecutionHeaderMapping, - crate::ExecutionHeaders, - OptionQuery, ->; +pub use primitives::{AncestryProof, ExecutionProof}; /// FinalizedState ring buffer implementation pub(crate) type FinalizedBeaconStateBuffer = RingBufferMapImpl< diff --git a/bridges/snowbridge/pallets/ethereum-client/src/weights.rs b/bridges/snowbridge/pallets/ethereum-client/src/weights.rs index 1a5893923942..5361fabf275b 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/weights.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/weights.rs @@ -36,7 +36,6 @@ pub trait WeightInfo { fn force_checkpoint() -> Weight; fn submit() -> Weight; fn submit_with_sync_committee() -> Weight; - fn submit_execution_header() -> Weight; } // For backwards compatibility and tests @@ -59,10 +58,4 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(6)) .saturating_add(RocksDbWeight::get().writes(1)) } - fn submit_execution_header() -> Weight { - Weight::from_parts(113_158_000_u64, 0) - .saturating_add(Weight::from_parts(0, 3537)) - .saturating_add(RocksDbWeight::get().reads(5)) - .saturating_add(RocksDbWeight::get().writes(4)) - } } diff --git a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/execution-header-update.json b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/execution-header-update.json deleted file mode 100755 index 319014249c12..000000000000 --- a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/execution-header-update.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "header": { - "slot": 215, - "proposer_index": 2, - "parent_root": "0x97518f531a252bb6ca547b21aca9da767943ec99211d3b15c804e34c3a523f45", - "state_root": "0xb088b5a3a8c90d6dc919a695cd7bb0267c6f983ea2e675c559ceb8f46cb90b67", - "body_root": "0x0ba23c8224fdd01531d5ad51486353bd524a0b4c20bca704e26d3210616f829b" - }, - "ancestry_proof": { - "header_branch": [ - "0x97518f531a252bb6ca547b21aca9da767943ec99211d3b15c804e34c3a523f45", - "0x5ce0db996bd499c2b4f7a93263d5aafd052f420efb617cce6fdd54e25516aa45", - "0x84f0e373b66011ce774c7061440c0a50a51cce2b4b335395eee3e563d605597f", - "0x48f9ccc5f9594142c18c3b5c39a99f0549329c6ab3ba06c9a50030eadca87770", - "0xf89d6e311e05bc75a6f63ce118bccce254551f1a88d54c3b4f773f81f946bd99", - "0x2edd6d893c22636675147c07dfcdb541a146e87c3f15b51c388be4868246dc9b", - "0xd76b7de5f856e3208a91a42c9c398a7f4fab35e667bf916346050ae742514a2d", - "0x83a2e233e76385953ca41de4c3afe60471a61f0cc1b3846b4a0670e3e563b747", - "0xe783a5a109c2ad74e4eb53e8f6b11b31266a92a9e16c1fd5873109c5d41b282c", - "0xd4ea1ef3869ee6a0fd0b19d7d70027d144eecd4f1d32cbf47632a0a9069164b9", - "0xf8179564b58eb93a850d35e4156a04db651106442ad891c3e85155c1762792f1", - "0x4cbb1edb48cf1e32fb30db60aaaeaf6190ffe4d0c8dbc96cec307daecb78be12", - "0xb7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f" - ], - "finalized_block_root": "0x890a7f23b9ed2160901654be9efc575d6830ca860e2a97866ae3423fb7bd7231" - }, - "execution_header": { - "Deneb": { - "parent_hash": "0xd82ec63f5c5e6ba61d62f09c188f158e6449b94bdcc31941e68639eec3c4cf7a", - "fee_recipient": "0x0000000000000000000000000000000000000000", - "state_root": "0x8b65545fe5f3216b47b6339b9c91ca2b7f1032a970b04246d9e9fb4460ee34c3", - "receipts_root": "0x7b1f61b9714c080ef0be014e01657a15f45f0304b477beebc7ca5596c8033095", - "logs_bloom": "0x00000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000000000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000040004000000000000002000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000200000000000010", - "prev_randao": "0x6d9e2a012d82b1b6cb0a2c1c1ed24cc16dbb56e6e39ae545371e0666ab057862", - "block_number": 215, - "gas_limit": 64842908, - "gas_used": 119301, - "timestamp": 1705859527, - "extra_data": "0xd983010d0a846765746888676f312e32312e358664617277696e", - "base_fee_per_gas": 7, - "block_hash": "0x48498dbfbcfae53a7f4c289ee00747aceea925f6260c50ead5a33e1c55c40f98", - "transactions_root": "0x5ebc1347fe3df0611d4f66b19bd8e1c6f4eaed0371d850f14c83b1c77ea234e6", - "withdrawals_root": "0x792930bbd5baac43bcc798ee49aa8185ef76bb3b44ba62b91d86ae569e4bb535", - "blob_gas_used": 0, - "excess_blob_gas": 0 - } - }, - "execution_branch": [ - "0xf8c69d3830406d668619bcccc13c8dddde41e863326f7418b241d5924c4ad34a", - "0xb46f0c01805fe212e15907981b757e6c496b0cb06664224655613dcec82505bb", - "0xdb56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71", - "0xf4d6b5cf9c6e212615c3674fa625d04eb1114153fb221ef5ad02aa433fc67cfb" - ] -} \ No newline at end of file diff --git a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/execution-proof.json b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/execution-proof.json new file mode 100755 index 000000000000..f55898087dfe --- /dev/null +++ b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/execution-proof.json @@ -0,0 +1,54 @@ +{ + "header": { + "slot": 393, + "proposer_index": 4, + "parent_root": "0x6545b47a614a1dd4cad042a0cdbbf5be347e8ffcdc02c6c64540d5153acebeef", + "state_root": "0xb62ac34a8cb82497be9542fe2114410c9f6021855b766015406101a1f3d86434", + "body_root": "0x04005fe231e11a5b7b1580cb73b177ae8b338bedd745497e6bb7122126a806db" + }, + "ancestry_proof": { + "header_branch": [ + "0x6545b47a614a1dd4cad042a0cdbbf5be347e8ffcdc02c6c64540d5153acebeef", + "0xfa84cc88ca53a72181599ff4eb07d8b444bce023fe2347c3b4f51004c43439d3", + "0xcadc8ae211c6f2221c9138e829249adf902419c78eb4727a150baa4d9a02cc9d", + "0x33a89962df08a35c52bd7e1d887cd71fa7803e68787d05c714036f6edf75947c", + "0x2c9760fce5c2829ef3f25595a703c21eb22d0186ce223295556ed5da663a82cf", + "0xe1aa87654db79c8a0ecd6c89726bb662fcb1684badaef5cd5256f479e3c622e1", + "0xaa70d5f314e4a1fbb9c362f3db79b21bf68b328887248651fbd29fc501d0ca97", + "0x160b6c235b3a1ed4ef5f80b03ee1c76f7bf3f591c92fca9d8663e9221b9f9f0f", + "0xf68d7dcd6a07a18e9de7b5d2aa1980eb962e11d7dcb584c96e81a7635c8d2535", + "0x1d5f912dfd6697110dd1ecb5cb8e77952eef57d85deb373572572df62bb157fc", + "0xffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b", + "0x6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220", + "0xb7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f" + ], + "finalized_block_root": "0x751414cd97c0624f922b3e80285e9f776b08fa22fd5f87391f2ed7ef571a8d46" + }, + "execution_header": { + "Deneb": { + "parent_hash": "0x8092290aa21b7751576440f77edd02a94058429ce50e63a92d620951fb25eda2", + "fee_recipient": "0x0000000000000000000000000000000000000000", + "state_root": "0x96a83e9ddf745346fafcb0b03d57314623df669ed543c110662b21302a0fae8b", + "receipts_root": "0xdccdfceea05036f7b61dcdabadc937945d31e68a8d3dfd4dc85684457988c284", + "logs_bloom": "0x00000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000400000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000080000000000000000000000000000040004000000000000002002002000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000080000000000000000000000000000000000100000000000000000200000200000010", + "prev_randao": "0x62e309d4f5119d1f5c783abc20fc1a549efbab546d8d0b25ff1cfd58be524e67", + "block_number": 393, + "gas_limit": 54492273, + "gas_used": 199644, + "timestamp": 1710552813, + "extra_data": "0xd983010d0b846765746888676f312e32312e368664617277696e", + "base_fee_per_gas": 7, + "block_hash": "0x6a9810efb9581d30c1a5c9074f27c68ea779a8c1ae31c213241df16225f4e131", + "transactions_root": "0x2cfa6ed7327e8807c7973516c5c32a68ef2459e586e8067e113d081c3bd8c07d", + "withdrawals_root": "0x792930bbd5baac43bcc798ee49aa8185ef76bb3b44ba62b91d86ae569e4bb535", + "blob_gas_used": 0, + "excess_blob_gas": 0 + } + }, + "execution_branch": [ + "0xa6833fa629f3286b6916c6e50b8bf089fc9126bee6f64d0413b4e59c1265834d", + "0xb46f0c01805fe212e15907981b757e6c496b0cb06664224655613dcec82505bb", + "0xdb56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71", + "0xd3af7c05c516726be7505239e0b9c7cb53d24abce6b91cdb3b3995f0164a75da" + ] +} \ No newline at end of file diff --git a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/finalized-header-update.json b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/finalized-header-update.json index f9d5324d57b1..2dec5cc56fac 100755 --- a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/finalized-header-update.json +++ b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/finalized-header-update.json @@ -1,38 +1,40 @@ { "attested_header": { - "slot": 2566, - "proposer_index": 6, - "parent_root": "0x6eb9f13a2c496318ce1ab3087bbd872f5c9519a1a7ca8231a2453e3cb523af00", - "state_root": "0xc8cb12766113dff7e46d2917267bf33d0626d99dd47715fcdbc5c65fad3c04b4", - "body_root": "0xd8cfd0d7bc9bc3724417a1655bb0a67c0765ca36197320f4d834150b52ef1420" + "slot": 933, + "proposer_index": 1, + "parent_root": "0xf5fc63e2780ca302b97aea73fc95d74d702b5afe9a772c2b68f695026337b620", + "state_root": "0xd856d11636bc4d866e78be9e747b222b0977556a367ab42e4085277301438050", + "body_root": "0x5689091ab4eb76c2e876271add4924e1c66ce987c300c24aac2ad8c703e9a33f" }, "sync_aggregate": { "sync_committee_bits": "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "sync_committee_signature": "0x9296f9a0387f2cac47008e22ad7c3cd3d49d35384c13e6aa1eacca7dca7c3d2ca81515e50eb3396b9550ed20ef7d8fa2049a186598feb2c00e93728045fcff917733d1827481b8fc95f3913e27fc70112c2490496eb57bb7181f02c3f9fd471f" + "sync_committee_signature": "0x93a3d482fe2a2f7fd2b634169752a8fddf1dc28b23a020b398be8526faf37a74ca0f6db1bed78a9c7256c09a6115235e108e0e8a7ce09287317b0856c4b77dfa5adba6cf4c3ebea5bfa4cd2fcde80fd0a532f2defe65d530201d5d2258796559" }, - "signature_slot": 2567, + "signature_slot": 934, "next_sync_committee_update": null, "finalized_header": { - "slot": 2496, - "proposer_index": 2, - "parent_root": "0xc99e49787106733eeebab4d93eb326e1f2214575c9d928f0c4ab0da0776f1622", - "state_root": "0xfbf8a08c86ef36bd173e37e733da4a78aa8e85fee99a990e858dd12a59087fde", - "body_root": "0xa2a8ad06901447b2807a9059580a4c40d8a941f325b1343c69f7c7c6c90e4ab0" + "slot": 864, + "proposer_index": 4, + "parent_root": "0x614e7672f991ac268cd841055973f55e1e42228831a211adef207bb7329be614", + "state_root": "0x5fa8dfca3d760e4242ab46d529144627aa85348a19173b6e081172c701197a4a", + "body_root": "0x0f34c083b1803666bb1ac5e73fa71582731a2cf37d279ff0a3b0cad5a2ff371e" }, "finality_branch": [ - "0x4e00000000000000000000000000000000000000000000000000000000000000", + "0x1b00000000000000000000000000000000000000000000000000000000000000", "0x10c726fac935bf9657cc7476d3cfa7bedec5983dcfb59e8a7df6d0a619e108d7", "0x98e9116c6bb7f20de18800dc63e73e689d06d6a47d35b5e2b32cf093d475840d", - "0x958b8e43347f6df6fa5eb3d62d06a862381a6585aa40640dd1c0de11f1cf89c1", - "0xf107dce04faa86a28fc5d4a618be9cb8d4fc3c23d6c42c3624f3ff4bf6586a03", - "0xa501cdc02e86969ac3e4d0c5a36f4f049efaa1ab8cb6693f51d130eb52a80f30" + "0xf12d9aededc72724e417b518fe6f847684f26f81616243dedf8c551cc7d504f5", + "0x89a85d0907ab3fd6e00ae385f61d456c6191646404ae7b8d23d0e60440cf4d00", + "0x9fc943b6020eb61d780d78bcc6f6102a81d2c868d58f36e61c6e286a2dc4d8c2" ], - "block_roots_root": "0xd160b7687041891b73e54b06fc4e04f82d0fa8fdd76705895e216c6b24709dfe", + "block_roots_root": "0xb9aab9c388c4e4fcd899b71f62c498fc73406e38e8eb14aa440e9affa06f2a10", "block_roots_branch": [ - "0x105290e42d98ab6a0ada6e55453cede36c672abf645eeb986b88d7487616e135", - "0x9da41f274bcdf6122335443d9ce94d07163b48dba3e2f9499ff56f4e48b48b99", - "0xecea7e1d3152d8130e83afdfe34b4de4ba2b69a33c9471991096daf454de9cf5", - "0xb2bf1758e50b2bfff29169fbc70fdb884b2b05bb615dbc53567574da6f4f1ae2", - "0xcd87069daf70975779126d6af833b7d636c75ca4d5e750ebcad0e76408a5e5bf" - ] + "0x733422bd810895dab74cbbe07c69dd440cbb51f573181ad4dddac30fcdd0f41f", + "0x9b9eca73ab01d14549c325ba1b4610bb20bf1f8ec2dbd649f9d8cc7f3cea75fa", + "0xbcc666ad0ad9f9725cbd682bc95589d35b1b53b2a615f1e6e8dd5e086336becf", + "0x3069b547a08f703a1715016e926cbd64e71f93f64fb68d98d8c8f1ab745c46e5", + "0xc2de7e1097239404e17b263cfa0473533cc41e903cb03440d633bc5c27314cb4" + ], + "execution_header": null, + "execution_branch": null } \ No newline at end of file diff --git a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/inbound-message.json b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/inbound-message.json index 5aa5a59f0237..6589dca5fb45 100644 --- a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/inbound-message.json +++ b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/inbound-message.json @@ -1,31 +1,79 @@ { - "execution_header": { - "parent_hash": "0xd82ec63f5c5e6ba61d62f09c188f158e6449b94bdcc31941e68639eec3c4cf7a", - "state_root": "0x8b65545fe5f3216b47b6339b9c91ca2b7f1032a970b04246d9e9fb4460ee34c3", - "receipts_root": "0x7b1f61b9714c080ef0be014e01657a15f45f0304b477beebc7ca5596c8033095", - "block_number": 215 + "event_log": { + "address": "0xeda338e4dc46038493b885327842fd3e301cab39", + "topics": [ + "0x7153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84f", + "0xc173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539", + "0x5f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0" + ], + "data": "0x00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d00e40b54020000000000000000000000000000000000000000000000000000000000" }, - "message": { - "event_log": { - "address": "0xeda338e4dc46038493b885327842fd3e301cab39", - "topics": [ - "0x7153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84f", - "0xc173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539", - "0x5f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0" + "proof": { + "block_hash": "0x6a9810efb9581d30c1a5c9074f27c68ea779a8c1ae31c213241df16225f4e131", + "tx_index": 0, + "receipt_proof": { + "keys": [ + "0xdccdfceea05036f7b61dcdabadc937945d31e68a8d3dfd4dc85684457988c284", + "0x4a98e45a319168b0fc6005ce6b744ee9bf54338e2c0784b976a8578d241ced0f" ], - "data": "0x00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d00e40b54020000000000000000000000000000000000000000000000000000000000" + "values": [ + "0xf851a09c01dd6d2d8de951c45af23d3ad00829ce021c04d6c8acbe1612d456ee320d4980808080808080a04a98e45a319168b0fc6005ce6b744ee9bf54338e2c0784b976a8578d241ced0f8080808080808080", + "0xf9028c30b9028802f90284018301d205b9010000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000000000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000040004000000000000002000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000200000000000010f90179f85894eda338e4dc46038493b885327842fd3e301cab39e1a0f78bb28d4b1d7da699e5c0bc2be29c2b04b5aab6aacf6298fe5304f9db9c6d7ea000000000000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7df9011c94eda338e4dc46038493b885327842fd3e301cab39f863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a05f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0b8a000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d00e40b54020000000000000000000000000000000000000000000000000000000000" + ] }, - "Proof": { - "block_hash": "0x48498dbfbcfae53a7f4c289ee00747aceea925f6260c50ead5a33e1c55c40f98", - "tx_index": 0, - "data": { - "keys": [ - "0x7b1f61b9714c080ef0be014e01657a15f45f0304b477beebc7ca5596c8033095" + "execution_proof": { + "header": { + "slot": 393, + "proposer_index": 4, + "parent_root": "0x6545b47a614a1dd4cad042a0cdbbf5be347e8ffcdc02c6c64540d5153acebeef", + "state_root": "0xb62ac34a8cb82497be9542fe2114410c9f6021855b766015406101a1f3d86434", + "body_root": "0x04005fe231e11a5b7b1580cb73b177ae8b338bedd745497e6bb7122126a806db" + }, + "ancestry_proof": { + "header_branch": [ + "0x6545b47a614a1dd4cad042a0cdbbf5be347e8ffcdc02c6c64540d5153acebeef", + "0xfa84cc88ca53a72181599ff4eb07d8b444bce023fe2347c3b4f51004c43439d3", + "0xcadc8ae211c6f2221c9138e829249adf902419c78eb4727a150baa4d9a02cc9d", + "0x33a89962df08a35c52bd7e1d887cd71fa7803e68787d05c714036f6edf75947c", + "0x2c9760fce5c2829ef3f25595a703c21eb22d0186ce223295556ed5da663a82cf", + "0xe1aa87654db79c8a0ecd6c89726bb662fcb1684badaef5cd5256f479e3c622e1", + "0xaa70d5f314e4a1fbb9c362f3db79b21bf68b328887248651fbd29fc501d0ca97", + "0x160b6c235b3a1ed4ef5f80b03ee1c76f7bf3f591c92fca9d8663e9221b9f9f0f", + "0xf68d7dcd6a07a18e9de7b5d2aa1980eb962e11d7dcb584c96e81a7635c8d2535", + "0x1d5f912dfd6697110dd1ecb5cb8e77952eef57d85deb373572572df62bb157fc", + "0xffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b", + "0x6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220", + "0xb7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f" ], - "values": [ - "0xf9028e822080b9028802f90284018301d205b9010000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000000000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000040004000000000000002000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000200000000000010f90179f85894eda338e4dc46038493b885327842fd3e301cab39e1a0f78bb28d4b1d7da699e5c0bc2be29c2b04b5aab6aacf6298fe5304f9db9c6d7ea000000000000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7df9011c94eda338e4dc46038493b885327842fd3e301cab39f863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a05f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0b8a000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d00e40b54020000000000000000000000000000000000000000000000000000000000" - ] - } + "finalized_block_root": "0x751414cd97c0624f922b3e80285e9f776b08fa22fd5f87391f2ed7ef571a8d46" + }, + "execution_header": { + "Deneb": { + "parent_hash": "0x8092290aa21b7751576440f77edd02a94058429ce50e63a92d620951fb25eda2", + "fee_recipient": "0x0000000000000000000000000000000000000000", + "state_root": "0x96a83e9ddf745346fafcb0b03d57314623df669ed543c110662b21302a0fae8b", + "receipts_root": "0xdccdfceea05036f7b61dcdabadc937945d31e68a8d3dfd4dc85684457988c284", + "logs_bloom": "0x00000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000400000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000080000000000000000000000000000040004000000000000002002002000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000080000000000000000000000000000000000100000000000000000200000200000010", + "prev_randao": "0x62e309d4f5119d1f5c783abc20fc1a549efbab546d8d0b25ff1cfd58be524e67", + "block_number": 393, + "gas_limit": 54492273, + "gas_used": 199644, + "timestamp": 1710552813, + "extra_data": "0xd983010d0b846765746888676f312e32312e368664617277696e", + "base_fee_per_gas": 7, + "block_hash": "0x6a9810efb9581d30c1a5c9074f27c68ea779a8c1ae31c213241df16225f4e131", + "transactions_root": "0x2cfa6ed7327e8807c7973516c5c32a68ef2459e586e8067e113d081c3bd8c07d", + "withdrawals_root": "0x792930bbd5baac43bcc798ee49aa8185ef76bb3b44ba62b91d86ae569e4bb535", + "blob_gas_used": 0, + "excess_blob_gas": 0 + } + }, + "execution_branch": [ + "0xa6833fa629f3286b6916c6e50b8bf089fc9126bee6f64d0413b4e59c1265834d", + "0xb46f0c01805fe212e15907981b757e6c496b0cb06664224655613dcec82505bb", + "0xdb56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71", + "0xd3af7c05c516726be7505239e0b9c7cb53d24abce6b91cdb3b3995f0164a75da" + ] } } } \ No newline at end of file diff --git a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/initial-checkpoint.json b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/initial-checkpoint.json index 202790c1db5b..a62d646617e4 100755 --- a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/initial-checkpoint.json +++ b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/initial-checkpoint.json @@ -1,10 +1,10 @@ { "header": { - "slot": 2496, - "proposer_index": 2, - "parent_root": "0xc99e49787106733eeebab4d93eb326e1f2214575c9d928f0c4ab0da0776f1622", - "state_root": "0xfbf8a08c86ef36bd173e37e733da4a78aa8e85fee99a990e858dd12a59087fde", - "body_root": "0xa2a8ad06901447b2807a9059580a4c40d8a941f325b1343c69f7c7c6c90e4ab0" + "slot": 864, + "proposer_index": 4, + "parent_root": "0x614e7672f991ac268cd841055973f55e1e42228831a211adef207bb7329be614", + "state_root": "0x5fa8dfca3d760e4242ab46d529144627aa85348a19173b6e081172c701197a4a", + "body_root": "0x0f34c083b1803666bb1ac5e73fa71582731a2cf37d279ff0a3b0cad5a2ff371e" }, "current_sync_committee": { "pubkeys": [ @@ -525,18 +525,18 @@ }, "current_sync_committee_branch": [ "0x3ade38d498a062b50880a9409e1ca3a7fd4315d91eeb3bb83e56ac6bfe8d6a59", - "0x93880225bf99a0c5ec22b266ff829837754e9c5edf37a68c05b8f803fd82fa45", - "0x4c60656ec9a95fcf11030ad309c716b5b15beb7f60a0bcfc7c9d4eff505472ff", - "0x22d1645fceb4bf9a695043dda19a53e784ec70df6a6b1bd66ea30eba1cca5f2f", - "0xa8fc6cad84ceefc633ec56c2d031d525e1cb4b51c70eb252919fce5bba9a1fde" + "0xa9e90f89e7f90fd5d79a6bbcaf40ba5cfc05ab1b561ac51c84867c32248d5b1e", + "0xbd1a76b03e02402bb24a627de1980a80ab17691980271f597b844b89b497ef75", + "0x07bbcd27c7cad089023db046eda17e8209842b7d97add8b873519e84fe6480e7", + "0x94c11eeee4cb6192bf40810f23486d8c75dfbc2b6f28d988d6f74435ede243b0" ], "validators_root": "0x270d43e74ce340de4bca2b1936beca0f4f5408d9e78aec4850920baf659d5b69", - "block_roots_root": "0xd160b7687041891b73e54b06fc4e04f82d0fa8fdd76705895e216c6b24709dfe", + "block_roots_root": "0xb9aab9c388c4e4fcd899b71f62c498fc73406e38e8eb14aa440e9affa06f2a10", "block_roots_branch": [ - "0x105290e42d98ab6a0ada6e55453cede36c672abf645eeb986b88d7487616e135", - "0x9da41f274bcdf6122335443d9ce94d07163b48dba3e2f9499ff56f4e48b48b99", - "0xecea7e1d3152d8130e83afdfe34b4de4ba2b69a33c9471991096daf454de9cf5", - "0xb2bf1758e50b2bfff29169fbc70fdb884b2b05bb615dbc53567574da6f4f1ae2", - "0xcd87069daf70975779126d6af833b7d636c75ca4d5e750ebcad0e76408a5e5bf" + "0x733422bd810895dab74cbbe07c69dd440cbb51f573181ad4dddac30fcdd0f41f", + "0x9b9eca73ab01d14549c325ba1b4610bb20bf1f8ec2dbd649f9d8cc7f3cea75fa", + "0xbcc666ad0ad9f9725cbd682bc95589d35b1b53b2a615f1e6e8dd5e086336becf", + "0x3069b547a08f703a1715016e926cbd64e71f93f64fb68d98d8c8f1ab745c46e5", + "0xc2de7e1097239404e17b263cfa0473533cc41e903cb03440d633bc5c27314cb4" ] } \ No newline at end of file diff --git a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/sync-committee-update.json b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/sync-committee-update.json index 6bf20355c7a3..4d601d7d8f0b 100755 --- a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/sync-committee-update.json +++ b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/sync-committee-update.json @@ -2,13 +2,13 @@ "attested_header": { "slot": 129, "proposer_index": 5, - "parent_root": "0xe32b6c18f029e755b0273dc1c4fa2bc4979794c8286ad40276c1b8a8e36049d8", - "state_root": "0x5ec9dacf25a5f09f20be0c59246b3d8dcfe64bd085b4bac5cec180690339801e", - "body_root": "0x4080cf2412d6ff77fc3164ad6155423a7112f207f173145ec16371a93f481f87" + "parent_root": "0xc2def03fe44a2802130ca1a6d8406e4ccf4f344fec7075d4d84431cd4a8b0904", + "state_root": "0xfa62cde6666add7353d7aedcb61ebe3c6c84b5361e34f814825b1250affb5be4", + "body_root": "0x0f9c69f243fe7b5fa5860396c66c720a9e8b1e526e7914188930497cc4a9134c" }, "sync_aggregate": { "sync_committee_bits": "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "sync_committee_signature": "0xa761c3333fbb3d36bc8f65454f898da38001499dcd37494cf3d86940a995399ae649216ba4c985af154f83f72c8b1856079b7636a7a8d7d3f7602df2cbf699edb72b65253e82de4d9cc4db7377eafb22f799129f63f094a21c00675bdd5cc243" + "sync_committee_signature": "0x810cfde2afea3e276256c09bdf1cd321c33dcadeefddcfd24f488e6f756d917cfda90b5b437b3a4b4ef880985afa28a40cf565ec0a82877ddee36adc01d55d9d4a911ae3e22556e4c2636f1c707366fba019fb49450440fcd263d0b054b04bf0" }, "signature_slot": 130, "next_sync_committee_update": { @@ -531,33 +531,35 @@ }, "next_sync_committee_branch": [ "0x3ade38d498a062b50880a9409e1ca3a7fd4315d91eeb3bb83e56ac6bfe8d6a59", - "0xfd1e5ff5d4a15081efe3ff17857b1f95984c9a271b1c41c2f81f43e60c2cc541", - "0xe1c97f93bb7352d395d1ff8ee29881572cb7eb5d71634783701171dcd30cd93d", - "0x77fa2170ddbd89b15dae02f2e6cf9f76c8e00d1c4217320acffbe01576d0da61", - "0xe97288e0627219087a024078d69445f34f0583a6350a7c3c40c39fd1fa6f8d68" + "0x43276bee17fc9fba3f4866e902f0e5b5b308d79db91154bb8bf819973837a7d9", + "0x5572348e13ce59446ca0ea7cfeed07579da05f121920a76559e19bda94dd81cd", + "0x2d58adca9f3c742530de037f1933d6de1920ea4b68581613d4bc32b71547f221", + "0x7072b3c6577cd5a89b3234968f316f54630bb97eafbdb59e5b61637a9640255f" ] }, "finalized_header": { "slot": 64, "proposer_index": 4, - "parent_root": "0x0f7bc2353778c14c7f6dba0fc5fe6eec87228b0d3a5447b61dce67b4d9338de3", - "state_root": "0xfeb990de653ce494c0a263f820eaf05a9300dbdc30cb6065ede602827bfccde4", - "body_root": "0xf5235cd8c24f2695fc5b7989926305c10ad8cf5a87d62a739f675f5543df2ec1" + "parent_root": "0xa876486aaad7ddb897f369fd22d0a9903cd61d00c9e0dfe7998dd68d1008c678", + "state_root": "0x818e21c3388575f8ccc9ff17ec79d5a57915bcd31bccf47770f65a18e068416b", + "body_root": "0x1d1f73b864b3bb7e11ff91b56ca1381e0f9ca8122b2c542db88243604c763019" }, "finality_branch": [ "0x0200000000000000000000000000000000000000000000000000000000000000", "0x10c726fac935bf9657cc7476d3cfa7bedec5983dcfb59e8a7df6d0a619e108d7", "0x98e9116c6bb7f20de18800dc63e73e689d06d6a47d35b5e2b32cf093d475840d", - "0xe1c97f93bb7352d395d1ff8ee29881572cb7eb5d71634783701171dcd30cd93d", - "0x77fa2170ddbd89b15dae02f2e6cf9f76c8e00d1c4217320acffbe01576d0da61", - "0xe97288e0627219087a024078d69445f34f0583a6350a7c3c40c39fd1fa6f8d68" + "0x5572348e13ce59446ca0ea7cfeed07579da05f121920a76559e19bda94dd81cd", + "0x2d58adca9f3c742530de037f1933d6de1920ea4b68581613d4bc32b71547f221", + "0x7072b3c6577cd5a89b3234968f316f54630bb97eafbdb59e5b61637a9640255f" ], - "block_roots_root": "0x6fcdfd1c3fb1bdd421fe59dddfff3855b5ed5e30373887991a0059d019ad12bc", + "block_roots_root": "0x715b08694bef183a6d94b3113d16a7129f89fc3edec85a7e0eaf6ef9153552ef", "block_roots_branch": [ - "0x94b59531f172bc24f914bc0c10104ccb158676850f8cc3b47b6ddb7f096ebdd7", - "0x22470ed9155a938587d44d5fa19217c0f939d8862e504e67cd8cb4d1b960795e", - "0xfeec3ef1a68f93849e71e84f90b99602cccc31868137b6887ca8244a4b979e8e", + "0x4028c72c71b6ce80ea7d18b2c9471f4e4fa39746261a9921e832a4a2f9bdf7bb", + "0x75f98062661785d3290b7bd998b64446582baa49210733fd4603e1a97cd45a44", + "0x6fb757f44052f30c464810f01b0132adfa1a5446d8715b41e9af88eee1ee3e65", "0x5340ad5877c72dca689ca04bc8fedb78d67a4801d99887937edd8ccd29f87e82", - "0xf5ff4b0c6190005015889879568f5f0d9c40134c7ec4ffdda47950dcd92395ad" - ] + "0xf2b3cb56753939a728ccad399a434ca490f018f2f331529ec0d8b2d59c509271" + ], + "execution_header": null, + "execution_branch": null } \ No newline at end of file diff --git a/bridges/snowbridge/pallets/inbound-queue/Cargo.toml b/bridges/snowbridge/pallets/inbound-queue/Cargo.toml index 2e81a3d9ee4f..d6a640d454ec 100644 --- a/bridges/snowbridge/pallets/inbound-queue/Cargo.toml +++ b/bridges/snowbridge/pallets/inbound-queue/Cargo.toml @@ -41,7 +41,7 @@ snowbridge-core = { path = "../../primitives/core", default-features = false } snowbridge-ethereum = { path = "../../primitives/ethereum", default-features = false } snowbridge-router-primitives = { path = "../../primitives/router", default-features = false } snowbridge-beacon-primitives = { path = "../../primitives/beacon", default-features = false } -snowbridge-pallet-inbound-queue-fixtures = { path = "./fixtures", default-features = false, optional = true } +snowbridge-pallet-inbound-queue-fixtures = { path = "fixtures", default-features = false, optional = true } [dev-dependencies] frame-benchmarking = { path = "../../../../substrate/frame/benchmarking" } diff --git a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/lib.rs b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/lib.rs index 39588d1a478e..9ff9a0cedde4 100644 --- a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/lib.rs +++ b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/lib.rs @@ -4,17 +4,6 @@ extern crate alloc; -use snowbridge_beacon_primitives::CompactExecutionHeader; -use snowbridge_core::inbound::Message; -use sp_core::RuntimeDebug; - pub mod register_token; -pub mod register_token_with_insufficient_fee; pub mod send_token; pub mod send_token_to_penpal; - -#[derive(Clone, RuntimeDebug)] -pub struct InboundQueueFixture { - pub execution_header: CompactExecutionHeader, - pub message: Message, -} diff --git a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/register_token.rs b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/register_token.rs index 59cf43df46bf..7854b0a8e71a 100644 --- a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/register_token.rs +++ b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/register_token.rs @@ -3,20 +3,16 @@ // Generated, do not edit! // See ethereum client README.md for instructions to generate -use crate::InboundQueueFixture; use alloc::vec; use hex_literal::hex; -use snowbridge_beacon_primitives::CompactExecutionHeader; -use snowbridge_core::inbound::{Log, Message, Proof}; +use snowbridge_beacon_primitives::{ + types::deneb, AncestryProof, BeaconHeader, ExecutionProof, VersionedExecutionPayloadHeader, +}; +use snowbridge_core::inbound::{InboundQueueFixture, Log, Message, Proof}; +use sp_core::U256; pub fn make_register_token_message() -> InboundQueueFixture { InboundQueueFixture { - execution_header: CompactExecutionHeader{ - parent_hash: hex!("d5de3dd02c96dbdc8aaa4db70a1e9fdab5ded5f4d52f18798acd56a3d37d1ad6").into(), - block_number: 772, - state_root: hex!("49cba2a79b23ad74cefe80c3a96699825d1cda0f75bfceb587c5549211c86245").into(), - receipts_root: hex!("7b1f61b9714c080ef0be014e01657a15f45f0304b477beebc7ca5596c8033095").into(), - }, message: Message { event_log: Log { address: hex!("eda338e4dc46038493b885327842fd3e301cab39").into(), @@ -28,14 +24,74 @@ pub fn make_register_token_message() -> InboundQueueFixture { data: hex!("00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d00e40b54020000000000000000000000000000000000000000000000000000000000").into(), }, proof: Proof { - block_hash: hex!("392182a385b3a417e8ddea8b252953ee81e6ec0fb09d9056c96c89fbeb703a3f").into(), - tx_index: 0, - data: (vec![ - hex!("7b1f61b9714c080ef0be014e01657a15f45f0304b477beebc7ca5596c8033095").to_vec(), + receipt_proof: (vec![ + hex!("dccdfceea05036f7b61dcdabadc937945d31e68a8d3dfd4dc85684457988c284").to_vec(), + hex!("4a98e45a319168b0fc6005ce6b744ee9bf54338e2c0784b976a8578d241ced0f").to_vec(), ], vec![ - hex!("f9028e822080b9028802f90284018301d205b9010000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000000000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000040004000000000000002000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000200000000000010f90179f85894eda338e4dc46038493b885327842fd3e301cab39e1a0f78bb28d4b1d7da699e5c0bc2be29c2b04b5aab6aacf6298fe5304f9db9c6d7ea000000000000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7df9011c94eda338e4dc46038493b885327842fd3e301cab39f863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a05f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0b8a000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d00e40b54020000000000000000000000000000000000000000000000000000000000").to_vec(), + hex!("f851a09c01dd6d2d8de951c45af23d3ad00829ce021c04d6c8acbe1612d456ee320d4980808080808080a04a98e45a319168b0fc6005ce6b744ee9bf54338e2c0784b976a8578d241ced0f8080808080808080").to_vec(), + hex!("f9028c30b9028802f90284018301d205b9010000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000000000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000040004000000000000002000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000200000000000010f90179f85894eda338e4dc46038493b885327842fd3e301cab39e1a0f78bb28d4b1d7da699e5c0bc2be29c2b04b5aab6aacf6298fe5304f9db9c6d7ea000000000000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7df9011c94eda338e4dc46038493b885327842fd3e301cab39f863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a05f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0b8a000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d00e40b54020000000000000000000000000000000000000000000000000000000000").to_vec(), ]), + execution_proof: ExecutionProof { + header: BeaconHeader { + slot: 393, + proposer_index: 4, + parent_root: hex!("6545b47a614a1dd4cad042a0cdbbf5be347e8ffcdc02c6c64540d5153acebeef").into(), + state_root: hex!("b62ac34a8cb82497be9542fe2114410c9f6021855b766015406101a1f3d86434").into(), + body_root: hex!("04005fe231e11a5b7b1580cb73b177ae8b338bedd745497e6bb7122126a806db").into(), + }, + ancestry_proof: Some(AncestryProof { + header_branch: vec![ + hex!("6545b47a614a1dd4cad042a0cdbbf5be347e8ffcdc02c6c64540d5153acebeef").into(), + hex!("fa84cc88ca53a72181599ff4eb07d8b444bce023fe2347c3b4f51004c43439d3").into(), + hex!("cadc8ae211c6f2221c9138e829249adf902419c78eb4727a150baa4d9a02cc9d").into(), + hex!("33a89962df08a35c52bd7e1d887cd71fa7803e68787d05c714036f6edf75947c").into(), + hex!("2c9760fce5c2829ef3f25595a703c21eb22d0186ce223295556ed5da663a82cf").into(), + hex!("e1aa87654db79c8a0ecd6c89726bb662fcb1684badaef5cd5256f479e3c622e1").into(), + hex!("aa70d5f314e4a1fbb9c362f3db79b21bf68b328887248651fbd29fc501d0ca97").into(), + hex!("160b6c235b3a1ed4ef5f80b03ee1c76f7bf3f591c92fca9d8663e9221b9f9f0f").into(), + hex!("f68d7dcd6a07a18e9de7b5d2aa1980eb962e11d7dcb584c96e81a7635c8d2535").into(), + hex!("1d5f912dfd6697110dd1ecb5cb8e77952eef57d85deb373572572df62bb157fc").into(), + hex!("ffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b").into(), + hex!("6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220").into(), + hex!("b7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f").into(), + ], + finalized_block_root: hex!("751414cd97c0624f922b3e80285e9f776b08fa22fd5f87391f2ed7ef571a8d46").into(), + }), + execution_header: VersionedExecutionPayloadHeader::Deneb(deneb::ExecutionPayloadHeader { + parent_hash: hex!("8092290aa21b7751576440f77edd02a94058429ce50e63a92d620951fb25eda2").into(), + fee_recipient: hex!("0000000000000000000000000000000000000000").into(), + state_root: hex!("96a83e9ddf745346fafcb0b03d57314623df669ed543c110662b21302a0fae8b").into(), + receipts_root: hex!("dccdfceea05036f7b61dcdabadc937945d31e68a8d3dfd4dc85684457988c284").into(), + logs_bloom: hex!("00000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000400000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000080000000000000000000000000000040004000000000000002002002000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000080000000000000000000000000000000000100000000000000000200000200000010").into(), + prev_randao: hex!("62e309d4f5119d1f5c783abc20fc1a549efbab546d8d0b25ff1cfd58be524e67").into(), + block_number: 393, + gas_limit: 54492273, + gas_used: 199644, + timestamp: 1710552813, + extra_data: hex!("d983010d0b846765746888676f312e32312e368664617277696e").into(), + base_fee_per_gas: U256::from(7u64), + block_hash: hex!("6a9810efb9581d30c1a5c9074f27c68ea779a8c1ae31c213241df16225f4e131").into(), + transactions_root: hex!("2cfa6ed7327e8807c7973516c5c32a68ef2459e586e8067e113d081c3bd8c07d").into(), + withdrawals_root: hex!("792930bbd5baac43bcc798ee49aa8185ef76bb3b44ba62b91d86ae569e4bb535").into(), + blob_gas_used: 0, + excess_blob_gas: 0, + }), + execution_branch: vec![ + hex!("a6833fa629f3286b6916c6e50b8bf089fc9126bee6f64d0413b4e59c1265834d").into(), + hex!("b46f0c01805fe212e15907981b757e6c496b0cb06664224655613dcec82505bb").into(), + hex!("db56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71").into(), + hex!("d3af7c05c516726be7505239e0b9c7cb53d24abce6b91cdb3b3995f0164a75da").into(), + ], + } }, }, + finalized_header: BeaconHeader { + slot: 864, + proposer_index: 4, + parent_root: hex!("614e7672f991ac268cd841055973f55e1e42228831a211adef207bb7329be614").into(), + state_root: hex!("5fa8dfca3d760e4242ab46d529144627aa85348a19173b6e081172c701197a4a").into(), + body_root: hex!("0f34c083b1803666bb1ac5e73fa71582731a2cf37d279ff0a3b0cad5a2ff371e").into(), + }, + block_roots_root: hex!("b9aab9c388c4e4fcd899b71f62c498fc73406e38e8eb14aa440e9affa06f2a10").into(), } } diff --git a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/register_token_with_insufficient_fee.rs b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/register_token_with_insufficient_fee.rs deleted file mode 100644 index 0fa446b36224..000000000000 --- a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/register_token_with_insufficient_fee.rs +++ /dev/null @@ -1,42 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// SPDX-FileCopyrightText: 2023 Snowfork -// Generated, do not edit! -// See ethereum client README.md for instructions to generate - -use crate::InboundQueueFixture; -use alloc::vec; -use hex_literal::hex; -use snowbridge_beacon_primitives::CompactExecutionHeader; -use snowbridge_core::inbound::{Log, Message, Proof}; - -pub fn make_register_token_with_infufficient_fee_message() -> InboundQueueFixture { - InboundQueueFixture { - execution_header: CompactExecutionHeader{ - parent_hash: hex!("998e81dc6df788a920b67e058fbde0dc3f4ec6f11f3f7cd8c3148e6d99584885").into(), - block_number: 338, - state_root: hex!("30ef9c9db2609de19bbc6c3cbeddac889e82bbcb2db20304b3abdfbdc7134cbf").into(), - receipts_root: hex!("969335c3132a007cb8b5886a3c23dd8da63cba04aeda29857a86ee1c13dae782").into(), - }, - message: Message { - event_log: Log { - address: hex!("eda338e4dc46038493b885327842fd3e301cab39").into(), - topics: vec![ - hex!("7153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84f").into(), - hex!("c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539").into(), - hex!("5f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0").into(), - ], - // insufficient xcm fee as only 1000(hex:e803) - data: hex!("00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7de8030000000000000000000000000000000000000000000000000000000000000000").into(), - }, - proof: Proof { - block_hash: hex!("5976f37f0e331d194eb331df74355ef47565c3a1bd11c95a45b681f6917085c1").into(), - tx_index: 0, - data: (vec![ - hex!("969335c3132a007cb8b5886a3c23dd8da63cba04aeda29857a86ee1c13dae782").to_vec(), - ], vec![ - hex!("f9028e822080b9028802f90284018301d205b9010000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000000000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000040004000000000000002000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000200000000000010f90179f85894eda338e4dc46038493b885327842fd3e301cab39e1a0f78bb28d4b1d7da699e5c0bc2be29c2b04b5aab6aacf6298fe5304f9db9c6d7ea000000000000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7df9011c94eda338e4dc46038493b885327842fd3e301cab39f863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a05f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0b8a000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7de8030000000000000000000000000000000000000000000000000000000000000000").to_vec(), - ]), - }, - }, - } -} diff --git a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/send_token.rs b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/send_token.rs index be194ce2dfe3..6f84445ecabb 100755 --- a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/send_token.rs +++ b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/send_token.rs @@ -3,20 +3,16 @@ // Generated, do not edit! // See ethereum client README.md for instructions to generate -use crate::InboundQueueFixture; use alloc::vec; use hex_literal::hex; -use snowbridge_beacon_primitives::CompactExecutionHeader; -use snowbridge_core::inbound::{Log, Message, Proof}; +use snowbridge_beacon_primitives::{ + types::deneb, AncestryProof, BeaconHeader, ExecutionProof, VersionedExecutionPayloadHeader, +}; +use snowbridge_core::inbound::{InboundQueueFixture, Log, Message, Proof}; +use sp_core::U256; pub fn make_send_token_message() -> InboundQueueFixture { InboundQueueFixture { - execution_header: CompactExecutionHeader{ - parent_hash: hex!("920cecde45d428e3a77590b70f8533cf4c2c36917b8a7b74c915e7fa3dae7075").into(), - block_number: 1148, - state_root: hex!("bbc6ba0e9940d641afecbbaf3f97abd2b9ffaf2f6bd4879c4a71e659eca89978").into(), - receipts_root: hex!("9f3340b57eddc1f86de30776db57faeca80269a3dd459031741988dec240ce34").into(), - }, message: Message { event_log: Log { address: hex!("eda338e4dc46038493b885327842fd3e301cab39").into(), @@ -28,14 +24,72 @@ pub fn make_send_token_message() -> InboundQueueFixture { data: hex!("00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000005f00a736aa00000000000187d1f7fdfee7f651fabc8bfcb6e086c278b77a7d008eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48000064a7b3b6e00d000000000000000000e40b5402000000000000000000000000").into(), }, proof: Proof { - block_hash: hex!("d3c155f123c3cbff22f3d7869283e02179edea9ffa7a5e9a4d8414c2a6b8991f").into(), - tx_index: 0, - data: (vec![ - hex!("9f3340b57eddc1f86de30776db57faeca80269a3dd459031741988dec240ce34").to_vec(), + receipt_proof: (vec![ + hex!("f9d844c5b79638609ba385b910fec3b5d891c9d7b189f135f0432f33473de915").to_vec(), ], vec![ - hex!("f90451822080b9044b02f90447018301bcb9b9010000800000000000000000000020000000000000000000004000000000000000000400000000000000000000001000000010000000000000000000000008000000200000000000000001000008000000000000000000000000000000008000080000000000200000000000000000000000000100000000000000000011000000000000020200000000000000000000000000003000000040080008000000000000000000040044000021000000002000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000200800000000000f9033cf89b9487d1f7fdfee7f651fabc8bfcb6e086c278b77a7df863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa000000000000000000000000090a987b944cb1dcce5564e5fdecd7a54d3de27fea000000000000000000000000057a2d4ff0c3866d96556884bf09fecdd7ccd530ca00000000000000000000000000000000000000000000000000de0b6b3a7640000f9015d94eda338e4dc46038493b885327842fd3e301cab39f884a024c5d2de620c6e25186ae16f6919eba93b6e2c1a33857cc419d9f3a00d6967e9a000000000000000000000000090a987b944cb1dcce5564e5fdecd7a54d3de27fea000000000000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7da000000000000000000000000000000000000000000000000000000000000003e8b8c000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000de0b6b3a76400000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000208eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48f9013c94eda338e4dc46038493b885327842fd3e301cab39f863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a0c8eaf22f2cb07bac4679df0a660e7115ed87fcfd4e32ac269f6540265bbbd26fb8c000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000005f00a736aa00000000000187d1f7fdfee7f651fabc8bfcb6e086c278b77a7d008eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48000064a7b3b6e00d000000000000000000e40b5402000000000000000000000000").to_vec(), + hex!("f90451822080b9044b02f90447018301bcb6b9010000800000000000000000000020000000000000000000004000000000000000000400000000000000000000001000000010000000000000000000000008000000200000000000000001000008000000000000000000000000000000008000080000000000200000000000000000000000000100000000000000000011000000000000020200000000000000000000000000003000000040080008000000000000000000040044000021000000002000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000200800000000000f9033cf89b9487d1f7fdfee7f651fabc8bfcb6e086c278b77a7df863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa000000000000000000000000090a987b944cb1dcce5564e5fdecd7a54d3de27fea000000000000000000000000057a2d4ff0c3866d96556884bf09fecdd7ccd530ca00000000000000000000000000000000000000000000000000de0b6b3a7640000f9015d94eda338e4dc46038493b885327842fd3e301cab39f884a024c5d2de620c6e25186ae16f6919eba93b6e2c1a33857cc419d9f3a00d6967e9a000000000000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7da000000000000000000000000090a987b944cb1dcce5564e5fdecd7a54d3de27fea000000000000000000000000000000000000000000000000000000000000003e8b8c000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000de0b6b3a76400000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000208eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48f9013c94eda338e4dc46038493b885327842fd3e301cab39f863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a0c8eaf22f2cb07bac4679df0a660e7115ed87fcfd4e32ac269f6540265bbbd26fb8c000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000005f00a736aa00000000000187d1f7fdfee7f651fabc8bfcb6e086c278b77a7d008eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48000064a7b3b6e00d000000000000000000e40b5402000000000000000000000000").to_vec(), ]), + execution_proof: ExecutionProof { + header: BeaconHeader { + slot: 2321, + proposer_index: 5, + parent_root: hex!("2add14727840d3a5ea061e14baa47030bb81380a65999200d119e73b86411d20").into(), + state_root: hex!("d962981467920bb2b7efa4a7a1baf64745582c3250857f49a957c5dae9a0da39").into(), + body_root: hex!("18e3f7f51a350f371ad35d166f2683b42af51d1836b295e4093be08acb0dcb7a").into(), + }, + ancestry_proof: Some(AncestryProof { + header_branch: vec![ + hex!("2add14727840d3a5ea061e14baa47030bb81380a65999200d119e73b86411d20").into(), + hex!("48b2e2f5256906a564e5058698f70e3406765fefd6a2edc064bb5fb88aa2ed0a").into(), + hex!("e5ed7c704e845418219b2fda42cd2f3438ffbe4c4b320935ae49439c6189f7a7").into(), + hex!("4a7ce24526b3f571548ad69679e4e260653a1b3b911a344e7f988f25a5c917a7").into(), + hex!("46fc859727ab0d0e8c344011f7d7a4426ccb537bb51363397e56cc7153f56391").into(), + hex!("f496b6f85a7c6c28a9048f2153550a7c5bcb4b23844ed3b87f6baa646124d8a3").into(), + hex!("7318644e474beb46e595a1875acc7444b937f5208065241911d2a71ac50c2de3").into(), + hex!("5cf48519e518ac64286aef5391319782dd38831d5dcc960578a6b9746d5f8cee").into(), + hex!("efb3e50fa39ca9fe7f76adbfa36fa8451ec2fd5d07b22aaf822137c04cf95a76").into(), + hex!("2206cd50750355ffaef4a67634c21168f2b564c58ffd04f33b0dc7af7dab3291").into(), + hex!("1a4014f6c4fcce9949fba74cb0f9e88df086706f9e05560cc9f0926f8c90e373").into(), + hex!("2df7cc0bcf3060be4132c63da7599c2600d9bbadf37ab001f15629bc2255698e").into(), + hex!("b7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f").into(), + ], + finalized_block_root: hex!("f869dd1c9598043008a3ac2a5d91b3d6c7b0bb3295b3843bc84c083d70b0e604").into(), + }), + execution_header: VersionedExecutionPayloadHeader::Deneb(deneb::ExecutionPayloadHeader { + parent_hash: hex!("5d7859883dde1eba6c98b20eac18426134b25da2a89e5e360f3343b15e0e0a31").into(), + fee_recipient: hex!("0000000000000000000000000000000000000000").into(), + state_root: hex!("f8fbebed4c84d46231bd293bb9fbc9340d5c28c284d99fdaddb77238b8960ae2").into(), + receipts_root: hex!("f9d844c5b79638609ba385b910fec3b5d891c9d7b189f135f0432f33473de915").into(), + logs_bloom: hex!("00800000000000000000000020000000000000000000004000000000000000000400000000000000000000001000000010000000000000000000000008000000200000000000000001000008000000000000000000000000000000008000080000000000200000000000000000000000000100000000000000000011000000000000020200000000000000000000000000003000000040080008000000000000000000040044000021000000002000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000200800000000000").into(), + prev_randao: hex!("15533eeb366c6386bea5aeb8f425871928348c092209e4377f2418a6dedd7fd0").into(), + block_number: 2321, + gas_limit: 30000000, + gas_used: 113846, + timestamp: 1710554741, + extra_data: hex!("d983010d0b846765746888676f312e32312e368664617277696e").into(), + base_fee_per_gas: U256::from(7u64), + block_hash: hex!("585a07122a30339b03b6481eae67c2d3de2b6b64f9f426230986519bf0f1bdfe").into(), + transactions_root: hex!("09cd60ee2207d804397c81f7b7e1e5d3307712b136e5376623a80317a4bdcd7a").into(), + withdrawals_root: hex!("792930bbd5baac43bcc798ee49aa8185ef76bb3b44ba62b91d86ae569e4bb535").into(), + blob_gas_used: 0, + excess_blob_gas: 0, + }), + execution_branch: vec![ + hex!("9d419471a9a4719b40e7607781fbe32d9a7766b79805505c78c0c58133496ba2").into(), + hex!("b46f0c01805fe212e15907981b757e6c496b0cb06664224655613dcec82505bb").into(), + hex!("db56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71").into(), + hex!("bee375b8f1bbe4cd0e783c78026c1829ae72741c2dead5cab05d6834c5e5df65").into(), + ], + } }, }, + finalized_header: BeaconHeader { + slot: 4032, + proposer_index: 5, + parent_root: hex!("180aaaec59d38c3860e8af203f01f41c9bc41665f4d17916567c80f6cd23e8a2").into(), + state_root: hex!("3341790429ed3bf894cafa3004351d0b99e08baf6c38eb2a54d58e69fd2d19c6").into(), + body_root: hex!("a221e0c695ac7b7d04ce39b28b954d8a682ecd57961d81b44783527c6295f455").into(), + }, + block_roots_root: hex!("5744385ef06f82e67606f49aa29cd162f2e837a68fb7bd82f1fc6155d9f8640f").into(), } } diff --git a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/send_token_to_penpal.rs b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/send_token_to_penpal.rs index ad20e8293c26..6172028b787d 100755 --- a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/send_token_to_penpal.rs +++ b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/send_token_to_penpal.rs @@ -3,39 +3,93 @@ // Generated, do not edit! // See ethereum client README.md for instructions to generate -use crate::InboundQueueFixture; use alloc::vec; use hex_literal::hex; -use snowbridge_beacon_primitives::CompactExecutionHeader; -use snowbridge_core::inbound::{Log, Message, Proof}; +use snowbridge_beacon_primitives::{ + types::deneb, AncestryProof, BeaconHeader, ExecutionProof, VersionedExecutionPayloadHeader, +}; +use snowbridge_core::inbound::{InboundQueueFixture, Log, Message, Proof}; +use sp_core::U256; pub fn make_send_token_to_penpal_message() -> InboundQueueFixture { InboundQueueFixture { - execution_header: CompactExecutionHeader{ - parent_hash: hex!("434148c290f27ee4be34fa344cd7608bf942a4541b27c9d868439631b3f37a8d").into(), - block_number: 816, - state_root: hex!("595e643f9095870e30e85e2bbef7d9e3a39df5aae839d26cf455d3dbf3e5a539").into(), - receipts_root: hex!("c40ab2c4abcfdea4f42195e0ad822806e5423108021c3b542646c7193319a6c1").into(), - }, message: Message { event_log: Log { address: hex!("eda338e4dc46038493b885327842fd3e301cab39").into(), topics: vec![ hex!("7153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84f").into(), hex!("c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539").into(), - hex!("c8eaf22f2cb07bac4679df0a660e7115ed87fcfd4e32ac269f6540265bbbd26f").into(), + hex!("be323bced46a1a49c8da2ab62ad5e974fd50f1dabaeed70b23ca5bcf14bfe4aa").into(), ], - data: hex!("00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000007300a736aa00000000000187d1f7fdfee7f651fabc8bfcb6e086c278b77a7d01d00700001cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c00286bee000000000000000000000000000064a7b3b6e00d000000000000000000e40b5402000000000000000000000000000000000000000000000000").into(), + data: hex!("00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000007300a736aa00000000000187d1f7fdfee7f651fabc8bfcb6e086c278b77a7d01d00700001cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c00286bee000000000000000000000000000064a7b3b6e00d000000000000000000e40b5402000000000000000000000000000000000000000000000000").into(), }, proof: Proof { - block_hash: hex!("6c49a7f8fb2014a23e58a949c95a6743174589a7ce83434b073dc05dec402f3d").into(), - tx_index: 0, - data: (vec![ - hex!("c40ab2c4abcfdea4f42195e0ad822806e5423108021c3b542646c7193319a6c1").to_vec(), + receipt_proof: (vec![ + hex!("106f1eaeac04e469da0020ad5c8a72af66323638bd3f561a3c8236063202c120").to_vec(), ], vec![ - hex!("f90471822080b9046b02f90467018301d30fb9010000800000000000000000000000000000000000000000004000000000000000000400000000004000000000001000000010000000000000000000000008000000200000000000000001000008000000000000000000000000000000008000080000000000200000000000000000000000000100000000000000000011000000000000020000000000000000000000000000003000000000080018000000000000000000040044000021000000002000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000200820000000000f9035cf89b9487d1f7fdfee7f651fabc8bfcb6e086c278b77a7df863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa000000000000000000000000090a987b944cb1dcce5564e5fdecd7a54d3de27fea000000000000000000000000057a2d4ff0c3866d96556884bf09fecdd7ccd530ca00000000000000000000000000000000000000000000000000de0b6b3a7640000f9015d94eda338e4dc46038493b885327842fd3e301cab39f884a024c5d2de620c6e25186ae16f6919eba93b6e2c1a33857cc419d9f3a00d6967e9a000000000000000000000000090a987b944cb1dcce5564e5fdecd7a54d3de27fea000000000000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7da000000000000000000000000000000000000000000000000000000000000007d0b8c000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000de0b6b3a76400000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000201cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07cf9015c94eda338e4dc46038493b885327842fd3e301cab39f863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a0c8eaf22f2cb07bac4679df0a660e7115ed87fcfd4e32ac269f6540265bbbd26fb8e000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000007300a736aa00000000000187d1f7fdfee7f651fabc8bfcb6e086c278b77a7d01d00700001cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c00286bee000000000000000000000000000064a7b3b6e00d000000000000000000e40b5402000000000000000000000000000000000000000000000000").to_vec(), + hex!("f90471822080b9046b02f904670183017d9cb9010000800000000000008000000000000000000000000000004000000000000000000400000000004000000000001000000010000000000000000000001008000000000000000000000001000008000040000000000000000000000000008000080000000000200000000000000000000000000100000000000000000010000000000000020000000000000000000000000000003000000000080018000000000000000000040004000021000000002000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000200820000000000f9035cf89b9487d1f7fdfee7f651fabc8bfcb6e086c278b77a7df863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa000000000000000000000000090a987b944cb1dcce5564e5fdecd7a54d3de27fea000000000000000000000000057a2d4ff0c3866d96556884bf09fecdd7ccd530ca00000000000000000000000000000000000000000000000000de0b6b3a7640000f9015d94eda338e4dc46038493b885327842fd3e301cab39f884a024c5d2de620c6e25186ae16f6919eba93b6e2c1a33857cc419d9f3a00d6967e9a000000000000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7da000000000000000000000000090a987b944cb1dcce5564e5fdecd7a54d3de27fea000000000000000000000000000000000000000000000000000000000000007d0b8c000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000de0b6b3a76400000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000201cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07cf9015c94eda338e4dc46038493b885327842fd3e301cab39f863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a0be323bced46a1a49c8da2ab62ad5e974fd50f1dabaeed70b23ca5bcf14bfe4aab8e000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000007300a736aa00000000000187d1f7fdfee7f651fabc8bfcb6e086c278b77a7d01d00700001cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c00286bee000000000000000000000000000064a7b3b6e00d000000000000000000e40b5402000000000000000000000000000000000000000000000000").to_vec(), ]), + execution_proof: ExecutionProof { + header: BeaconHeader { + slot: 4235, + proposer_index: 4, + parent_root: hex!("1b31e6264c19bcad120e434e0aede892e7d7c8ed80ab505cb593d9a4a16bc566").into(), + state_root: hex!("725f51771a0ecf72c647a283ab814ca088f998eb8c203181496b0b8e01f624fa").into(), + body_root: hex!("6f1c326d192e7e97e21e27b16fd7f000b8fa09b435ff028849927e382302b0ce").into(), + }, + ancestry_proof: Some(AncestryProof { + header_branch: vec![ + hex!("1b31e6264c19bcad120e434e0aede892e7d7c8ed80ab505cb593d9a4a16bc566").into(), + hex!("335eb186c077fa7053ec96dcc5d34502c997713d2d5bc4eb74842118d8cd5a64").into(), + hex!("326607faf2a7dfc9cfc4b6895f8f3d92a659552deb2c8fd1e892ec00c86c734c").into(), + hex!("4e20002125d7b6504df7c774f3f48e018e1e6762d03489149670a8335bba1425").into(), + hex!("e76af5cd61aade5aec8282b6f1df9046efa756b0466bba5e49032410f7739a1b").into(), + hex!("ee4dcd9527712116380cddafd120484a3bedf867225bbb86850b84decf6da730").into(), + hex!("e4687a07421d3150439a2cd2f09f3b468145d75b359a2e5fa88dfbec51725b15").into(), + hex!("38eaa78978e95759aa9b6f8504a8dbe36151f20ae41907e6a1ea165700ceefcd").into(), + hex!("1c1b071ec6f13e15c47d07d1bfbcc9135d6a6c819e68e7e6078a2007418c1a23").into(), + hex!("0b3ad7ad193c691c8c4ba1606ad2a90482cd1d033c7db58cfe739d0e20431e9e").into(), + hex!("ffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b").into(), + hex!("6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220").into(), + hex!("b2ffec5f2c14640305dd941330f09216c53b99d198e93735a400a6d3a4de191f").into(), + ], + finalized_block_root: hex!("08be7a59e947f08cd95c4ef470758730bf9e3b0db0824cb663ea541c39b0e65c").into(), + }), + execution_header: VersionedExecutionPayloadHeader::Deneb(deneb::ExecutionPayloadHeader { + parent_hash: hex!("5d1186ae041f58785edb2f01248e95832f2e5e5d6c4eb8f7ff2f58980bfc2de9").into(), + fee_recipient: hex!("0000000000000000000000000000000000000000").into(), + state_root: hex!("2a66114d20e93082c8e9b47c8d401a937013487d757c9c2f3123cf43dc1f656d").into(), + receipts_root: hex!("106f1eaeac04e469da0020ad5c8a72af66323638bd3f561a3c8236063202c120").into(), + logs_bloom: hex!("00800000000000008000000000000000000000000000004000000000000000000400000000004000000000001000000010000000000000000000001008000000000000000000000001000008000040000000000000000000000000008000080000000000200000000000000000000000000100000000000000000010000000000000020000000000000000000000000000003000000000080018000000000000000000040004000021000000002000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000200820000000000").into(), + prev_randao: hex!("92e063c7e369b74149fdd1d7132ed2f635a19b9d8bff57637b8ee4736576426e").into(), + block_number: 4235, + gas_limit: 30000000, + gas_used: 97692, + timestamp: 1710556655, + extra_data: hex!("d983010d0b846765746888676f312e32312e368664617277696e").into(), + base_fee_per_gas: U256::from(7u64), + block_hash: hex!("ce24fe3047aa20a8f222cd1d04567c12b39455400d681141962c2130e690953f").into(), + transactions_root: hex!("0c8388731de94771777c60d452077065354d90d6e5088db61fc6a134684195cc").into(), + withdrawals_root: hex!("792930bbd5baac43bcc798ee49aa8185ef76bb3b44ba62b91d86ae569e4bb535").into(), + blob_gas_used: 0, + excess_blob_gas: 0, + }), + execution_branch: vec![ + hex!("99d397fa180078e66cd3a3b77bcb07553052f4e21d447167f3a406f663b14e6a").into(), + hex!("b46f0c01805fe212e15907981b757e6c496b0cb06664224655613dcec82505bb").into(), + hex!("db56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71").into(), + hex!("53ddf17147819c1abb918178b0230d965d1bc2c0d389f45e91e54cb1d2d468aa").into(), + ], + } }, }, + finalized_header: BeaconHeader { + slot: 4672, + proposer_index: 4, + parent_root: hex!("951233bf9f4bddfb2fa8f54e3bd0c7883779ef850e13e076baae3130dd7732db").into(), + state_root: hex!("4d303003b8cb097cbcc14b0f551ee70dac42de2c1cc2f4acfca7058ca9713291").into(), + body_root: hex!("664d13952b6f369bf4cf3af74d067ec33616eb57ed3a8a403fd5bae4fbf737dd").into(), + }, + block_roots_root: hex!("af71048297c070e6539cf3b9b90ae07d86d363454606bc239734629e6b49b983").into(), } } diff --git a/bridges/snowbridge/pallets/inbound-queue/src/benchmarking/mod.rs b/bridges/snowbridge/pallets/inbound-queue/src/benchmarking/mod.rs index 931befa2ac67..d59d92757721 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/benchmarking/mod.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/benchmarking/mod.rs @@ -19,8 +19,8 @@ mod benchmarks { let create_message = make_register_token_message(); T::Helper::initialize_storage( - create_message.message.proof.block_hash, - create_message.execution_header, + create_message.finalized_header, + create_message.block_roots_root, ); let sovereign_account = sibling_sovereign_account::(1000u32.into()); diff --git a/bridges/snowbridge/pallets/inbound-queue/src/lib.rs b/bridges/snowbridge/pallets/inbound-queue/src/lib.rs index 9b8513772b25..a32006975613 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/lib.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/lib.rs @@ -30,9 +30,6 @@ mod envelope; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; -#[cfg(feature = "runtime-benchmarks")] -use snowbridge_beacon_primitives::CompactExecutionHeader; - pub mod weights; #[cfg(test)] @@ -48,7 +45,7 @@ use envelope::Envelope; use frame_support::{ traits::{ fungible::{Inspect, Mutate}, - tokens::Preservation, + tokens::{Fortitude, Preservation}, }, weights::WeightToFee, PalletError, @@ -56,6 +53,7 @@ use frame_support::{ use frame_system::ensure_signed; use scale_info::TypeInfo; use sp_core::{H160, H256}; +use sp_runtime::traits::Zero; use xcm::prelude::{ send_xcm, Instruction::SetTopic, Junction::*, Location, SendError as XcmpSendError, SendXcm, Xcm, XcmContext, XcmHash, @@ -75,6 +73,9 @@ use sp_runtime::{traits::Saturating, SaturatedConversion, TokenError}; pub use weights::WeightInfo; +#[cfg(feature = "runtime-benchmarks")] +use snowbridge_beacon_primitives::BeaconHeader; + type BalanceOf = <::Token as Inspect<::AccountId>>::Balance; @@ -94,7 +95,7 @@ pub mod pallet { #[cfg(feature = "runtime-benchmarks")] pub trait BenchmarkHelper { - fn initialize_storage(block_hash: H256, header: CompactExecutionHeader); + fn initialize_storage(beacon_header: BeaconHeader, block_roots_root: H256); } #[pallet::config] @@ -264,11 +265,19 @@ pub mod pallet { } })?; - // Reward relayer from the sovereign account of the destination parachain - // Expected to fail if sovereign account has no funds + // Reward relayer from the sovereign account of the destination parachain, only if funds + // are available let sovereign_account = sibling_sovereign_account::(channel.para_id); let delivery_cost = Self::calculate_delivery_cost(message.encode().len() as u32); - T::Token::transfer(&sovereign_account, &who, delivery_cost, Preservation::Preserve)?; + let amount = T::Token::reducible_balance( + &sovereign_account, + Preservation::Preserve, + Fortitude::Polite, + ) + .min(delivery_cost); + if !amount.is_zero() { + T::Token::transfer(&sovereign_account, &who, amount, Preservation::Preserve)?; + } // Decode message into XCM let (xcm, fee) = diff --git a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs index b5ef49ae98e6..df009b01421c 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs @@ -5,11 +5,13 @@ use super::*; use core::convert::From; use frame_support::{ derive_impl, parameter_types, - traits::{ConstU128, ConstU32, Everything}, + traits::{ConstU32, Everything}, weights::IdentityFee, }; use hex_literal::hex; -use snowbridge_beacon_primitives::{Fork, ForkVersions}; +use snowbridge_beacon_primitives::{ + types::deneb, BeaconHeader, ExecutionProof, Fork, ForkVersions, VersionedExecutionPayloadHeader, +}; use snowbridge_core::{ gwei, inbound::{Log, Proof, VerificationError}, @@ -65,6 +67,10 @@ impl frame_system::Config for Test { type Block = Block; } +parameter_types! { + pub const ExistentialDeposit: u128 = 1; +} + impl pallet_balances::Config for Test { type MaxLocks = (); type MaxReserves = (); @@ -72,7 +78,7 @@ impl pallet_balances::Config for Test { type Balance = Balance; type RuntimeEvent = RuntimeEvent; type DustRemoval = (); - type ExistentialDeposit = ConstU128<1>; + type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); type FreezeIdentifier = (); @@ -82,7 +88,6 @@ impl pallet_balances::Config for Test { } parameter_types! { - pub const ExecutionHeadersPruneThreshold: u32 = 10; pub const ChainForkVersions: ForkVersions = ForkVersions{ genesis: Fork { version: [0, 0, 0, 1], // 0x00000001 @@ -110,7 +115,6 @@ parameter_types! { impl snowbridge_pallet_ethereum_client::Config for Test { type RuntimeEvent = RuntimeEvent; type ForkVersions = ChainForkVersions; - type MaxExecutionHeadersToKeep = ExecutionHeadersPruneThreshold; type WeightInfo = (); } @@ -139,7 +143,7 @@ parameter_types! { #[cfg(feature = "runtime-benchmarks")] impl BenchmarkHelper for Test { // not implemented since the MockVerifier is used for tests - fn initialize_storage(_: H256, _: CompactExecutionHeader) {} + fn initialize_storage(_: BeaconHeader, _: H256) {} } // Mock XCM sender that always succeeds @@ -173,7 +177,8 @@ parameter_types! { pub Parameters: PricingParameters = PricingParameters { exchange_rate: FixedU128::from_rational(1, 400), fee_per_gas: gwei(20), - rewards: Rewards { local: DOT, remote: meth(1) } + rewards: Rewards { local: DOT, remote: meth(1) }, + multiplier: FixedU128::from_rational(1, 1), }; } @@ -334,5 +339,32 @@ pub fn mock_event_log_invalid_gateway() -> Log { } } +pub fn mock_execution_proof() -> ExecutionProof { + ExecutionProof { + header: BeaconHeader::default(), + ancestry_proof: None, + execution_header: VersionedExecutionPayloadHeader::Deneb(deneb::ExecutionPayloadHeader { + parent_hash: Default::default(), + fee_recipient: Default::default(), + state_root: Default::default(), + receipts_root: Default::default(), + logs_bloom: vec![], + prev_randao: Default::default(), + block_number: 0, + gas_limit: 0, + gas_used: 0, + timestamp: 0, + extra_data: vec![], + base_fee_per_gas: Default::default(), + block_hash: Default::default(), + transactions_root: Default::default(), + withdrawals_root: Default::default(), + blob_gas_used: 0, + excess_blob_gas: 0, + }), + execution_branch: vec![], + } +} + pub const ASSET_HUB_PARAID: u32 = 1000u32; pub const TEMPLATE_PARAID: u32 = 1001u32; diff --git a/bridges/snowbridge/pallets/inbound-queue/src/test.rs b/bridges/snowbridge/pallets/inbound-queue/src/test.rs index 131f44c95621..8435ae6fe914 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/test.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/test.rs @@ -7,7 +7,7 @@ use frame_support::{assert_noop, assert_ok}; use hex_literal::hex; use snowbridge_core::{inbound::Proof, ChannelId}; use sp_keyring::AccountKeyring as Keyring; -use sp_runtime::{DispatchError, TokenError}; +use sp_runtime::DispatchError; use crate::{Error, Event as InboundQueueEvent}; @@ -25,9 +25,8 @@ fn test_submit_happy_path() { let message = Message { event_log: mock_event_log(), proof: Proof { - block_hash: Default::default(), - tx_index: Default::default(), - data: Default::default(), + receipt_proof: Default::default(), + execution_proof: mock_execution_proof(), }, }; @@ -77,9 +76,8 @@ fn test_submit_xcm_invalid_channel() { let message = Message { event_log: mock_event_log_invalid_channel(), proof: Proof { - block_hash: Default::default(), - tx_index: Default::default(), - data: Default::default(), + receipt_proof: Default::default(), + execution_proof: mock_execution_proof(), }, }; assert_noop!( @@ -103,9 +101,8 @@ fn test_submit_with_invalid_gateway() { let message = Message { event_log: mock_event_log_invalid_gateway(), proof: Proof { - block_hash: Default::default(), - tx_index: Default::default(), - data: Default::default(), + receipt_proof: Default::default(), + execution_proof: mock_execution_proof(), }, }; assert_noop!( @@ -129,9 +126,8 @@ fn test_submit_with_invalid_nonce() { let message = Message { event_log: mock_event_log(), proof: Proof { - block_hash: Default::default(), - tx_index: Default::default(), - data: Default::default(), + receipt_proof: Default::default(), + execution_proof: mock_execution_proof(), }, }; assert_ok!(InboundQueue::submit(origin.clone(), message.clone())); @@ -150,12 +146,12 @@ fn test_submit_with_invalid_nonce() { } #[test] -fn test_submit_no_funds_to_reward_relayers() { +fn test_submit_no_funds_to_reward_relayers_just_ignore() { new_tester().execute_with(|| { let relayer: AccountId = Keyring::Bob.into(); let origin = RuntimeOrigin::signed(relayer); - // Reset balance of sovereign_account to zero so to trigger the FundsUnavailable error + // Reset balance of sovereign_account to zero first let sovereign_account = sibling_sovereign_account::(ASSET_HUB_PARAID.into()); Balances::set_balance(&sovereign_account, 0); @@ -163,15 +159,12 @@ fn test_submit_no_funds_to_reward_relayers() { let message = Message { event_log: mock_event_log(), proof: Proof { - block_hash: Default::default(), - tx_index: Default::default(), - data: Default::default(), + receipt_proof: Default::default(), + execution_proof: mock_execution_proof(), }, }; - assert_noop!( - InboundQueue::submit(origin.clone(), message.clone()), - TokenError::FundsUnavailable - ); + // Check submit successfully in case no funds available + assert_ok!(InboundQueue::submit(origin.clone(), message.clone())); }); } @@ -183,9 +176,8 @@ fn test_set_operating_mode() { let message = Message { event_log: mock_event_log(), proof: Proof { - block_hash: Default::default(), - tx_index: Default::default(), - data: Default::default(), + receipt_proof: Default::default(), + execution_proof: mock_execution_proof(), }, }; @@ -210,3 +202,44 @@ fn test_set_operating_mode_root_only() { ); }); } + +#[test] +fn test_submit_no_funds_to_reward_relayers_and_ed_preserved() { + new_tester().execute_with(|| { + let relayer: AccountId = Keyring::Bob.into(); + let origin = RuntimeOrigin::signed(relayer); + + // Reset balance of sovereign account to (ED+1) first + let sovereign_account = sibling_sovereign_account::(ASSET_HUB_PARAID.into()); + Balances::set_balance(&sovereign_account, ExistentialDeposit::get() + 1); + + // Submit message successfully + let message = Message { + event_log: mock_event_log(), + proof: Proof { + receipt_proof: Default::default(), + execution_proof: mock_execution_proof(), + }, + }; + assert_ok!(InboundQueue::submit(origin.clone(), message.clone())); + + // Check balance of sovereign account to ED + let amount = Balances::balance(&sovereign_account); + assert_eq!(amount, ExistentialDeposit::get()); + + // Submit another message with nonce set as 2 + let mut event_log = mock_event_log(); + event_log.data[31] = 2; + let message = Message { + event_log, + proof: Proof { + receipt_proof: Default::default(), + execution_proof: mock_execution_proof(), + }, + }; + assert_ok!(InboundQueue::submit(origin.clone(), message.clone())); + // Check balance of sovereign account as ED does not change + let amount = Balances::balance(&sovereign_account); + assert_eq!(amount, ExistentialDeposit::get()); + }); +} diff --git a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml index 4f43f2b088ea..83b6138b9cc5 100644 --- a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml @@ -23,7 +23,7 @@ sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-fea [dev-dependencies] hex-literal = { version = "0.4.1" } -env_logger = "0.9" +env_logger = "0.11" hex = "0.4" array-bytes = "4.1" sp-crypto-hashing = { path = "../../../../../substrate/primitives/crypto/hashing" } diff --git a/bridges/snowbridge/pallets/outbound-queue/runtime-api/src/lib.rs b/bridges/snowbridge/pallets/outbound-queue/runtime-api/src/lib.rs index 8de9a1992b7b..18a5477537b7 100644 --- a/bridges/snowbridge/pallets/outbound-queue/runtime-api/src/lib.rs +++ b/bridges/snowbridge/pallets/outbound-queue/runtime-api/src/lib.rs @@ -5,7 +5,10 @@ extern crate alloc; use frame_support::traits::tokens::Balance as BalanceT; -use snowbridge_core::outbound::Message; +use snowbridge_core::{ + outbound::{Command, Fee}, + PricingParameters, +}; use snowbridge_outbound_queue_merkle_tree::MerkleProof; sp_api::decl_runtime_apis! { @@ -13,10 +16,10 @@ sp_api::decl_runtime_apis! { { /// Generate a merkle proof for a committed message identified by `leaf_index`. /// The merkle root is stored in the block header as a - /// `\[`sp_runtime::generic::DigestItem::Other`\]` + /// `sp_runtime::generic::DigestItem::Other` fn prove_message(leaf_index: u64) -> Option; - /// Calculate the delivery fee for `message` - fn calculate_fee(message: Message) -> Option; + /// Calculate the delivery fee for `command` + fn calculate_fee(command: Command, parameters: Option>) -> Fee; } } diff --git a/bridges/snowbridge/pallets/outbound-queue/src/api.rs b/bridges/snowbridge/pallets/outbound-queue/src/api.rs index 44d63f1e2d23..b904819b1b18 100644 --- a/bridges/snowbridge/pallets/outbound-queue/src/api.rs +++ b/bridges/snowbridge/pallets/outbound-queue/src/api.rs @@ -4,8 +4,12 @@ use crate::{Config, MessageLeaves}; use frame_support::storage::StorageStreamIter; -use snowbridge_core::outbound::{Message, SendMessage}; +use snowbridge_core::{ + outbound::{Command, Fee, GasMeter}, + PricingParameters, +}; use snowbridge_outbound_queue_merkle_tree::{merkle_proof, MerkleProof}; +use sp_core::Get; pub fn prove_message(leaf_index: u64) -> Option where @@ -19,12 +23,14 @@ where Some(proof) } -pub fn calculate_fee(message: Message) -> Option +pub fn calculate_fee( + command: Command, + parameters: Option>, +) -> Fee where T: Config, { - match crate::Pallet::::validate(&message) { - Ok((_, fees)) => Some(fees.total()), - _ => None, - } + let gas_used_at_most = T::GasMeter::maximum_gas_used_at_most(&command); + let parameters = parameters.unwrap_or(T::PricingParameters::get()); + crate::Pallet::::calculate_fee(gas_used_at_most, parameters) } diff --git a/bridges/snowbridge/pallets/outbound-queue/src/lib.rs b/bridges/snowbridge/pallets/outbound-queue/src/lib.rs index 5fe4e44be527..79167a85c73d 100644 --- a/bridges/snowbridge/pallets/outbound-queue/src/lib.rs +++ b/bridges/snowbridge/pallets/outbound-queue/src/lib.rs @@ -47,24 +47,37 @@ //! consume on Ethereum. Using this upper bound, a final fee can be calculated. //! //! The fee calculation also requires the following parameters: -//! * ETH/DOT exchange rate -//! * Ether fee per unit of gas +//! * Average ETH/DOT exchange rate over some period +//! * Max fee per unit of gas that bridge is willing to refund relayers for //! //! By design, it is expected that governance should manually update these //! parameters every few weeks using the `set_pricing_parameters` extrinsic in the //! system pallet. //! +//! This is an interim measure. Once ETH/DOT liquidity pools are available in the Polkadot network, +//! we'll use them as a source of pricing info, subject to certain safeguards. +//! //! ## Fee Computation Function //! //! ```text //! LocalFee(Message) = WeightToFee(ProcessMessageWeight(Message)) -//! RemoteFee(Message) = MaxGasRequired(Message) * FeePerGas + Reward -//! Fee(Message) = LocalFee(Message) + (RemoteFee(Message) / Ratio("ETH/DOT")) +//! RemoteFee(Message) = MaxGasRequired(Message) * Params.MaxFeePerGas + Params.Reward +//! RemoteFeeAdjusted(Message) = Params.Multiplier * (RemoteFee(Message) / Params.Ratio("ETH/DOT")) +//! Fee(Message) = LocalFee(Message) + RemoteFeeAdjusted(Message) //! ``` //! -//! By design, the computed fee is always going to conservative, to cover worst-case -//! costs of dispatch on Ethereum. In future iterations of the design, we will optimize -//! this, or provide a mechanism to asynchronously refund a portion of collected fees. +//! By design, the computed fee includes a safety factor (the `Multiplier`) to cover +//! unfavourable fluctuations in the ETH/DOT exchange rate. +//! +//! ## Fee Settlement +//! +//! On the remote side, in the gateway contract, the relayer accrues +//! +//! ```text +//! Min(GasPrice, Message.MaxFeePerGas) * GasUsed() + Message.Reward +//! ``` +//! Or in plain english, relayers are refunded for gas consumption, using a +//! price that is a minimum of the actual gas price, or `Message.MaxFeePerGas`. //! //! # Extrinsics //! @@ -109,7 +122,7 @@ pub use snowbridge_outbound_queue_merkle_tree::MerkleProof; use sp_core::{H256, U256}; use sp_runtime::{ traits::{CheckedDiv, Hash}, - DigestItem, + DigestItem, Saturating, }; pub use types::{CommittedMessage, ProcessMessageOriginOf}; @@ -369,8 +382,9 @@ pub mod pallet { // downcast to u128 let fee: u128 = fee.try_into().defensive_unwrap_or(u128::MAX); - // convert to local currency + // multiply by multiplier and convert to local currency let fee = FixedU128::from_inner(fee) + .saturating_mul(params.multiplier) .checked_div(¶ms.exchange_rate) .expect("exchange rate is not zero; qed") .into_inner(); diff --git a/bridges/snowbridge/pallets/outbound-queue/src/mock.rs b/bridges/snowbridge/pallets/outbound-queue/src/mock.rs index 36a57c2c639f..41ed2386cc31 100644 --- a/bridges/snowbridge/pallets/outbound-queue/src/mock.rs +++ b/bridges/snowbridge/pallets/outbound-queue/src/mock.rs @@ -69,6 +69,7 @@ impl pallet_message_queue::Config for Test { type HeapSize = HeapSize; type MaxStale = MaxStale; type ServiceWeight = ServiceWeight; + type IdleMaxServiceWeight = (); type QueuePausedQuery = (); } @@ -77,7 +78,8 @@ parameter_types! { pub Parameters: PricingParameters = PricingParameters { exchange_rate: FixedU128::from_rational(1, 400), fee_per_gas: gwei(20), - rewards: Rewards { local: DOT, remote: meth(1) } + rewards: Rewards { local: DOT, remote: meth(1) }, + multiplier: FixedU128::from_rational(4, 3), }; } diff --git a/bridges/snowbridge/pallets/outbound-queue/src/test.rs b/bridges/snowbridge/pallets/outbound-queue/src/test.rs index 8ed4a318d68e..4e9ea36e24bc 100644 --- a/bridges/snowbridge/pallets/outbound-queue/src/test.rs +++ b/bridges/snowbridge/pallets/outbound-queue/src/test.rs @@ -268,28 +268,34 @@ fn encode_digest_item() { } #[test] -fn validate_messages_with_fees() { +fn test_calculate_fees_with_unit_multiplier() { new_tester().execute_with(|| { - let message = mock_message(1000); - let (_, fee) = OutboundQueue::validate(&message).unwrap(); + let gas_used: u64 = 250000; + let price_params: PricingParameters<::Balance> = PricingParameters { + exchange_rate: FixedU128::from_rational(1, 400), + fee_per_gas: 10000_u32.into(), + rewards: Rewards { local: 1_u32.into(), remote: 1_u32.into() }, + multiplier: FixedU128::from_rational(1, 1), + }; + let fee = OutboundQueue::calculate_fee(gas_used, price_params); assert_eq!(fee.local, 698000000); - assert_eq!(fee.remote, 2680000000000); + assert_eq!(fee.remote, 1000000); }); } #[test] -fn test_calculate_fees() { +fn test_calculate_fees_with_multiplier() { new_tester().execute_with(|| { let gas_used: u64 = 250000; - let illegal_price_params: PricingParameters<::Balance> = - PricingParameters { - exchange_rate: FixedU128::from_rational(1, 400), - fee_per_gas: 10000_u32.into(), - rewards: Rewards { local: 1_u32.into(), remote: 1_u32.into() }, - }; - let fee = OutboundQueue::calculate_fee(gas_used, illegal_price_params); + let price_params: PricingParameters<::Balance> = PricingParameters { + exchange_rate: FixedU128::from_rational(1, 400), + fee_per_gas: 10000_u32.into(), + rewards: Rewards { local: 1_u32.into(), remote: 1_u32.into() }, + multiplier: FixedU128::from_rational(4, 3), + }; + let fee = OutboundQueue::calculate_fee(gas_used, price_params); assert_eq!(fee.local, 698000000); - assert_eq!(fee.remote, 1000000); + assert_eq!(fee.remote, 1333333); }); } @@ -297,13 +303,13 @@ fn test_calculate_fees() { fn test_calculate_fees_with_valid_exchange_rate_but_remote_fee_calculated_as_zero() { new_tester().execute_with(|| { let gas_used: u64 = 250000; - let illegal_price_params: PricingParameters<::Balance> = - PricingParameters { - exchange_rate: FixedU128::from_rational(1, 1), - fee_per_gas: 1_u32.into(), - rewards: Rewards { local: 1_u32.into(), remote: 1_u32.into() }, - }; - let fee = OutboundQueue::calculate_fee(gas_used, illegal_price_params.clone()); + let price_params: PricingParameters<::Balance> = PricingParameters { + exchange_rate: FixedU128::from_rational(1, 1), + fee_per_gas: 1_u32.into(), + rewards: Rewards { local: 1_u32.into(), remote: 1_u32.into() }, + multiplier: FixedU128::from_rational(1, 1), + }; + let fee = OutboundQueue::calculate_fee(gas_used, price_params.clone()); assert_eq!(fee.local, 698000000); // Though none zero pricing params the remote fee calculated here is invalid // which should be avoided diff --git a/bridges/snowbridge/pallets/system/src/lib.rs b/bridges/snowbridge/pallets/system/src/lib.rs index 833885928ebc..5cd9a3f8fd68 100644 --- a/bridges/snowbridge/pallets/system/src/lib.rs +++ b/bridges/snowbridge/pallets/system/src/lib.rs @@ -161,6 +161,7 @@ pub mod pallet { type DefaultPricingParameters: Get>; /// Cost of delivering a message from Ethereum + #[pallet::constant] type InboundDeliveryCost: Get>; type WeightInfo: WeightInfo; @@ -336,6 +337,7 @@ pub mod pallet { let command = Command::SetPricingParameters { exchange_rate: params.exchange_rate.into(), delivery_cost: T::InboundDeliveryCost::get().saturated_into::(), + multiplier: params.multiplier.into(), }; Self::send(PRIMARY_GOVERNANCE_CHANNEL, command, PaysFee::::No)?; diff --git a/bridges/snowbridge/pallets/system/src/mock.rs b/bridges/snowbridge/pallets/system/src/mock.rs index a711eab5d3d4..687072a49e2e 100644 --- a/bridges/snowbridge/pallets/system/src/mock.rs +++ b/bridges/snowbridge/pallets/system/src/mock.rs @@ -148,6 +148,7 @@ impl pallet_message_queue::Config for Test { type HeapSize = HeapSize; type MaxStale = MaxStale; type ServiceWeight = ServiceWeight; + type IdleMaxServiceWeight = (); type QueuePausedQuery = (); } @@ -193,7 +194,8 @@ parameter_types! { pub Parameters: PricingParameters = PricingParameters { exchange_rate: FixedU128::from_rational(1, 400), fee_per_gas: gwei(20), - rewards: Rewards { local: DOT, remote: meth(1) } + rewards: Rewards { local: DOT, remote: meth(1) }, + multiplier: FixedU128::from_rational(4, 3) }; pub const InboundDeliveryCost: u128 = 1_000_000_000; diff --git a/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/mock.rs b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/mock.rs deleted file mode 100644 index d2cca373e92b..000000000000 --- a/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/mock.rs +++ /dev/null @@ -1,259 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// SPDX-FileCopyrightText: 2023 Snowfork -use crate as ethereum_beacon_client; -use frame_support::parameter_types; -use pallet_timestamp; -use primitives::{Fork, ForkVersions}; -use sp_core::H256; -use sp_runtime::traits::{BlakeTwo256, IdentityLookup}; - -#[cfg(not(feature = "beacon-spec-mainnet"))] -pub mod minimal { - use super::*; - - use crate::config; - use frame_support::derive_impl; - use hex_literal::hex; - use primitives::CompactExecutionHeader; - use snowbridge_core::inbound::{Log, Proof}; - use sp_runtime::BuildStorage; - use std::{fs::File, path::PathBuf}; - - type Block = frame_system::mocking::MockBlock; - - frame_support::construct_runtime!( - pub enum Test { - System: frame_system::{Pallet, Call, Storage, Event}, - Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, - EthereumBeaconClient: ethereum_beacon_client::{Pallet, Call, Storage, Event}, - } - ); - - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const SS58Prefix: u8 = 42; - } - - #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] - impl frame_system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type RuntimeTask = RuntimeTask; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; - type PalletInfo = PalletInfo; - type SS58Prefix = SS58Prefix; - type Nonce = u64; - type Block = Block; - } - - impl pallet_timestamp::Config for Test { - type Moment = u64; - type OnTimestampSet = (); - type MinimumPeriod = (); - type WeightInfo = (); - } - - parameter_types! { - pub const ExecutionHeadersPruneThreshold: u32 = 10; - pub const ChainForkVersions: ForkVersions = ForkVersions{ - genesis: Fork { - version: [0, 0, 0, 1], // 0x00000001 - epoch: 0, - }, - altair: Fork { - version: [1, 0, 0, 1], // 0x01000001 - epoch: 0, - }, - bellatrix: Fork { - version: [2, 0, 0, 1], // 0x02000001 - epoch: 0, - }, - capella: Fork { - version: [3, 0, 0, 1], // 0x03000001 - epoch: 0, - }, - }; - } - - impl ethereum_beacon_client::Config for Test { - type RuntimeEvent = RuntimeEvent; - type ForkVersions = ChainForkVersions; - type MaxExecutionHeadersToKeep = ExecutionHeadersPruneThreshold; - type WeightInfo = (); - } - - // Build genesis storage according to the mock runtime. - pub fn new_tester() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - let mut ext = sp_io::TestExternalities::new(t); - let _ = ext.execute_with(|| Timestamp::set(RuntimeOrigin::signed(1), 30_000)); - ext - } - - fn load_fixture(basename: &str) -> Result - where - T: for<'de> serde::Deserialize<'de>, - { - let filepath: PathBuf = - [env!("CARGO_MANIFEST_DIR"), "tests", "fixtures", basename].iter().collect(); - serde_json::from_reader(File::open(filepath).unwrap()) - } - - pub fn load_execution_header_update_fixture() -> primitives::ExecutionHeaderUpdate { - load_fixture("execution-header-update.minimal.json").unwrap() - } - - pub fn load_checkpoint_update_fixture( - ) -> primitives::CheckpointUpdate<{ config::SYNC_COMMITTEE_SIZE }> { - load_fixture("initial-checkpoint.minimal.json").unwrap() - } - - pub fn load_sync_committee_update_fixture( - ) -> primitives::Update<{ config::SYNC_COMMITTEE_SIZE }, { config::SYNC_COMMITTEE_BITS_SIZE }> { - load_fixture("sync-committee-update.minimal.json").unwrap() - } - - pub fn load_finalized_header_update_fixture( - ) -> primitives::Update<{ config::SYNC_COMMITTEE_SIZE }, { config::SYNC_COMMITTEE_BITS_SIZE }> { - load_fixture("finalized-header-update.minimal.json").unwrap() - } - - pub fn load_next_sync_committee_update_fixture( - ) -> primitives::Update<{ config::SYNC_COMMITTEE_SIZE }, { config::SYNC_COMMITTEE_BITS_SIZE }> { - load_fixture("next-sync-committee-update.minimal.json").unwrap() - } - - pub fn load_next_finalized_header_update_fixture( - ) -> primitives::Update<{ config::SYNC_COMMITTEE_SIZE }, { config::SYNC_COMMITTEE_BITS_SIZE }> { - load_fixture("next-finalized-header-update.minimal.json").unwrap() - } - - pub fn get_message_verification_payload() -> (Log, Proof) { - ( - Log { - address: hex!("ee9170abfbf9421ad6dd07f6bdec9d89f2b581e0").into(), - topics: vec![ - hex!("1b11dcf133cc240f682dab2d3a8e4cd35c5da8c9cf99adac4336f8512584c5ad").into(), - hex!("00000000000000000000000000000000000000000000000000000000000003e8").into(), - hex!("0000000000000000000000000000000000000000000000000000000000000001").into(), - ], - data: hex!("0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004b000f000000000000000100d184c103f7acc340847eee82a0b909e3358bc28d440edffa1352b13227e8ee646f3ea37456dec701345772617070656420457468657210574554481235003511000000000000000000000000000000000000000000").into(), - }, - Proof { - block_hash: hex!("05aaa60b0f27cce9e71909508527264b77ee14da7b5bf915fcc4e32715333213").into(), - tx_index: 0, - data: (vec![ - hex!("cf0d1c1ba57d1e0edfb59786c7e30c2b7e12bd54612b00cd21c4eaeecedf44fb").to_vec(), - hex!("d21fc4f68ab05bc4dcb23c67008e92c4d466437cdd6ed7aad0c008944c185510").to_vec(), - hex!("b9890f91ca0d77aa2a4adfaf9b9e40c94cac9e638b6d9797923865872944b646").to_vec(), - ], vec![ - hex!("f90131a0b601337b3aa10a671caa724eba641e759399979856141d3aea6b6b4ac59b889ba00c7d5dd48be9060221a02fb8fa213860b4c50d47046c8fa65ffaba5737d569e0a094601b62a1086cd9c9cb71a7ebff9e718f3217fd6e837efe4246733c0a196f63a06a4b0dd0aefc37b3c77828c8f07d1b7a2455ceb5dbfd3c77d7d6aeeddc2f7e8ca0d6e8e23142cdd8ec219e1f5d8b56aa18e456702b195deeaa210327284d42ade4a08a313d4c87023005d1ab631bbfe3f5de1e405d0e66d0bef3e033f1e5711b5521a0bf09a5d9a48b10ade82b8d6a5362a15921c8b5228a3487479b467db97411d82fa0f95cccae2a7c572ef3c566503e30bac2b2feb2d2f26eebf6d870dcf7f8cf59cea0d21fc4f68ab05bc4dcb23c67008e92c4d466437cdd6ed7aad0c008944c1855108080808080808080").to_vec(), - hex!("f851a0b9890f91ca0d77aa2a4adfaf9b9e40c94cac9e638b6d9797923865872944b646a060a634b9280e3a23fb63375e7bbdd9ab07fd379ab6a67e2312bbc112195fa358808080808080808080808080808080").to_vec(), - hex!("f9030820b9030402f90300018301d6e2b9010000000000000800000000000020040008000000000000000000000000400000008000000000000000000000000000000000000000000000000000000000042010000000001000000000000000000000000000000000040000000000000000000000000000000000000000000000008000000000000000002000000000000000000000000200000000000000200000000000100000000040000001000200008000000000000200000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000f901f5f87a942ffa5ecdbe006d30397c7636d3e015eee251369ff842a0c965575a00553e094ca7c5d14f02e107c258dda06867cbf9e0e69f80e71bbcc1a000000000000000000000000000000000000000000000000000000000000003e8a000000000000000000000000000000000000000000000000000000000000003e8f9011c94ee9170abfbf9421ad6dd07f6bdec9d89f2b581e0f863a01b11dcf133cc240f682dab2d3a8e4cd35c5da8c9cf99adac4336f8512584c5ada000000000000000000000000000000000000000000000000000000000000003e8a00000000000000000000000000000000000000000000000000000000000000001b8a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004b000f000000000000000100d184c103f7acc340847eee82a0b909e3358bc28d440edffa1352b13227e8ee646f3ea37456dec701345772617070656420457468657210574554481235003511000000000000000000000000000000000000000000f858948cf6147918a5cbb672703f879f385036f8793a24e1a01449abf21e49fd025f33495e77f7b1461caefdd3d4bb646424a3f445c4576a5ba0000000000000000000000000440edffa1352b13227e8ee646f3ea37456dec701").to_vec(), - ]), - } - ) - } - - pub fn get_message_verification_header() -> CompactExecutionHeader { - CompactExecutionHeader { - parent_hash: hex!("04a7f6ab8282203562c62f38b0ab41d32aaebe2c7ea687702b463148a6429e04") - .into(), - block_number: 55, - state_root: hex!("894d968712976d613519f973a317cb0781c7b039c89f27ea2b7ca193f7befdb3") - .into(), - receipts_root: hex!("cf0d1c1ba57d1e0edfb59786c7e30c2b7e12bd54612b00cd21c4eaeecedf44fb") - .into(), - } - } -} - -#[cfg(feature = "beacon-spec-mainnet")] -pub mod mainnet { - use super::*; - use frame_support::derive_impl; - - type Block = frame_system::mocking::MockBlock; - use sp_runtime::BuildStorage; - - frame_support::construct_runtime!( - pub enum Test { - System: frame_system::{Pallet, Call, Storage, Event}, - Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, - EthereumBeaconClient: ethereum_beacon_client::{Pallet, Call, Storage, Event}, - } - ); - - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const SS58Prefix: u8 = 42; - } - - #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] - impl frame_system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type RuntimeTask = RuntimeTask; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; - type PalletInfo = PalletInfo; - type SS58Prefix = SS58Prefix; - type Nonce = u64; - type Block = Block; - } - - impl pallet_timestamp::Config for Test { - type Moment = u64; - type OnTimestampSet = (); - type MinimumPeriod = (); - type WeightInfo = (); - } - - parameter_types! { - pub const ChainForkVersions: ForkVersions = ForkVersions{ - genesis: Fork { - version: [0, 0, 16, 32], // 0x00001020 - epoch: 0, - }, - altair: Fork { - version: [1, 0, 16, 32], // 0x01001020 - epoch: 36660, - }, - bellatrix: Fork { - version: [2, 0, 16, 32], // 0x02001020 - epoch: 112260, - }, - capella: Fork { - version: [3, 0, 16, 32], // 0x03001020 - epoch: 162304, - }, - }; - pub const ExecutionHeadersPruneThreshold: u32 = 10; - } - - impl ethereum_beacon_client::Config for Test { - type RuntimeEvent = RuntimeEvent; - type ForkVersions = ChainForkVersions; - type MaxExecutionHeadersToKeep = ExecutionHeadersPruneThreshold; - type WeightInfo = (); - } - - // Build genesis storage according to the mock runtime. - pub fn new_tester() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - let mut ext = sp_io::TestExternalities::new(t); - let _ = ext.execute_with(|| Timestamp::set(RuntimeOrigin::signed(1), 30_000)); - ext - } -} diff --git a/bridges/snowbridge/primitives/beacon/src/lib.rs b/bridges/snowbridge/primitives/beacon/src/lib.rs index 839d8e84eccf..550f28a68a8a 100644 --- a/bridges/snowbridge/primitives/beacon/src/lib.rs +++ b/bridges/snowbridge/primitives/beacon/src/lib.rs @@ -17,12 +17,12 @@ pub mod updates; mod serde_utils; pub use types::{ - BeaconHeader, CompactBeaconState, CompactExecutionHeader, ExecutionHeaderState, - ExecutionPayloadHeader, FinalizedHeaderState, Fork, ForkData, ForkVersion, ForkVersions, Mode, - PublicKey, Signature, SigningData, SyncAggregate, SyncCommittee, SyncCommitteePrepared, + AncestryProof, BeaconHeader, CompactBeaconState, ExecutionPayloadHeader, ExecutionProof, + FinalizedHeaderState, Fork, ForkData, ForkVersion, ForkVersions, Mode, PublicKey, Signature, + SigningData, SyncAggregate, SyncCommittee, SyncCommitteePrepared, VersionedExecutionPayloadHeader, }; -pub use updates::{CheckpointUpdate, ExecutionHeaderUpdate, NextSyncCommitteeUpdate, Update}; +pub use updates::{CheckpointUpdate, NextSyncCommitteeUpdate, Update}; pub use bits::decompress_sync_committee_bits; pub use bls::{ diff --git a/bridges/snowbridge/primitives/beacon/src/types.rs b/bridges/snowbridge/primitives/beacon/src/types.rs index db4feb5c9c0e..9d3ac2aad524 100644 --- a/bridges/snowbridge/primitives/beacon/src/types.rs +++ b/bridges/snowbridge/primitives/beacon/src/types.rs @@ -112,14 +112,6 @@ impl<'de> Deserialize<'de> for Signature { } } -#[derive(Copy, Clone, Default, Encode, Decode, TypeInfo, MaxEncodedLen)] -pub struct ExecutionHeaderState { - pub beacon_block_root: H256, - pub beacon_slot: u64, - pub block_hash: H256, - pub block_number: u64, -} - #[derive(Copy, Clone, Default, Encode, Decode, TypeInfo, MaxEncodedLen)] pub struct FinalizedHeaderState { pub beacon_block_root: H256, @@ -348,35 +340,6 @@ impl ExecutionPayloadHeader { } } -#[derive( - Default, - Encode, - Decode, - CloneNoBound, - PartialEqNoBound, - RuntimeDebugNoBound, - TypeInfo, - MaxEncodedLen, -)] -pub struct CompactExecutionHeader { - pub parent_hash: H256, - #[codec(compact)] - pub block_number: u64, - pub state_root: H256, - pub receipts_root: H256, -} - -impl From for CompactExecutionHeader { - fn from(execution_payload: ExecutionPayloadHeader) -> Self { - Self { - parent_hash: execution_payload.parent_hash, - block_number: execution_payload.block_number, - state_root: execution_payload.state_root, - receipts_root: execution_payload.receipts_root, - } - } -} - #[derive( Default, Encode, @@ -407,18 +370,6 @@ pub enum VersionedExecutionPayloadHeader { Deneb(deneb::ExecutionPayloadHeader), } -/// Convert VersionedExecutionPayloadHeader to CompactExecutionHeader -impl From for CompactExecutionHeader { - fn from(versioned_execution_header: VersionedExecutionPayloadHeader) -> Self { - match versioned_execution_header { - VersionedExecutionPayloadHeader::Capella(execution_payload_header) => - execution_payload_header.into(), - VersionedExecutionPayloadHeader::Deneb(execution_payload_header) => - execution_payload_header.into(), - } - } -} - impl VersionedExecutionPayloadHeader { pub fn hash_tree_root(&self) -> Result { match self { @@ -450,6 +401,45 @@ impl VersionedExecutionPayloadHeader { execution_payload_header.block_number, } } + + pub fn receipts_root(&self) -> H256 { + match self { + VersionedExecutionPayloadHeader::Capella(execution_payload_header) => + execution_payload_header.receipts_root, + VersionedExecutionPayloadHeader::Deneb(execution_payload_header) => + execution_payload_header.receipts_root, + } + } +} + +#[derive(Encode, Decode, CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound, TypeInfo)] +#[cfg_attr( + feature = "std", + derive(serde::Deserialize), + serde(deny_unknown_fields, bound(serialize = ""), bound(deserialize = "")) +)] +pub struct ExecutionProof { + /// Header for the beacon block containing the execution payload + pub header: BeaconHeader, + /// Proof that `header` is an ancestor of a finalized header + pub ancestry_proof: Option, + /// The execution header to be verified + pub execution_header: VersionedExecutionPayloadHeader, + /// Merkle proof that execution payload is contained within `header` + pub execution_branch: Vec, +} + +#[derive(Encode, Decode, CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound, TypeInfo)] +#[cfg_attr( + feature = "std", + derive(serde::Deserialize), + serde(deny_unknown_fields, bound(serialize = ""), bound(deserialize = "")) +)] +pub struct AncestryProof { + /// Merkle proof that `header` is an ancestor of `finalized_header` + pub header_branch: Vec, + /// Root of a finalized block that has already been imported into the light client + pub finalized_block_root: H256, } #[cfg(test)] @@ -578,7 +568,6 @@ pub enum Mode { } pub mod deneb { - use crate::CompactExecutionHeader; use codec::{Decode, Encode}; use frame_support::{CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound}; use scale_info::TypeInfo; @@ -630,15 +619,4 @@ pub mod deneb { pub blob_gas_used: u64, // [New in Deneb:EIP4844] pub excess_blob_gas: u64, // [New in Deneb:EIP4844] } - - impl From for CompactExecutionHeader { - fn from(execution_payload: ExecutionPayloadHeader) -> Self { - Self { - parent_hash: execution_payload.parent_hash, - block_number: execution_payload.block_number, - state_root: execution_payload.state_root, - receipts_root: execution_payload.receipts_root, - } - } - } } diff --git a/bridges/snowbridge/primitives/beacon/src/updates.rs b/bridges/snowbridge/primitives/beacon/src/updates.rs index c40e99f74f1c..38ea59c6dace 100644 --- a/bridges/snowbridge/primitives/beacon/src/updates.rs +++ b/bridges/snowbridge/primitives/beacon/src/updates.rs @@ -7,7 +7,7 @@ use frame_support::{CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound}; use scale_info::TypeInfo; use sp_core::H256; -use crate::types::{BeaconHeader, SyncAggregate, SyncCommittee, VersionedExecutionPayloadHeader}; +use crate::types::{BeaconHeader, SyncAggregate, SyncCommittee}; #[derive(Encode, Decode, CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound, TypeInfo)] #[cfg_attr( @@ -24,26 +24,13 @@ pub struct CheckpointUpdate { pub block_roots_branch: Vec, } -impl Default for CheckpointUpdate { - fn default() -> Self { - CheckpointUpdate { - header: Default::default(), - current_sync_committee: Default::default(), - current_sync_committee_branch: Default::default(), - validators_root: Default::default(), - block_roots_root: Default::default(), - block_roots_branch: Default::default(), - } - } -} - #[derive( Default, Encode, Decode, CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound, TypeInfo, )] #[cfg_attr( feature = "std", derive(serde::Deserialize), - serde(deny_unknown_fields, bound(serialize = ""), bound(deserialize = "")) + serde(bound(serialize = ""), bound(deserialize = "")) )] pub struct Update { /// A recent header attesting to the finalized header, using its `state_root`. @@ -79,33 +66,3 @@ pub struct NextSyncCommitteeUpdate { pub next_sync_committee: SyncCommittee, pub next_sync_committee_branch: Vec, } - -#[derive(Encode, Decode, CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound, TypeInfo)] -#[cfg_attr( - feature = "std", - derive(serde::Deserialize), - serde(deny_unknown_fields, bound(serialize = ""), bound(deserialize = "")) -)] -pub struct ExecutionHeaderUpdate { - /// Header for the beacon block containing the execution payload - pub header: BeaconHeader, - /// Proof that `header` is an ancestor of a finalized header - pub ancestry_proof: Option, - /// Execution header to be imported - pub execution_header: VersionedExecutionPayloadHeader, - /// Merkle proof that execution payload is contained within `header` - pub execution_branch: Vec, -} - -#[derive(Encode, Decode, CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound, TypeInfo)] -#[cfg_attr( - feature = "std", - derive(serde::Deserialize), - serde(deny_unknown_fields, bound(serialize = ""), bound(deserialize = "")) -)] -pub struct AncestryProof { - /// Merkle proof that `header` is an ancestor of `finalized_header` - pub header_branch: Vec, - /// Root of a finalized block that has already been imported into the light client - pub finalized_block_root: H256, -} diff --git a/bridges/snowbridge/primitives/core/src/inbound.rs b/bridges/snowbridge/primitives/core/src/inbound.rs index e7b0569d0160..b4a6b96bfbf0 100644 --- a/bridges/snowbridge/primitives/core/src/inbound.rs +++ b/bridges/snowbridge/primitives/core/src/inbound.rs @@ -7,6 +7,7 @@ use alloc::vec::Vec; use codec::{Decode, Encode}; use frame_support::PalletError; use scale_info::TypeInfo; +use snowbridge_beacon_primitives::{BeaconHeader, ExecutionProof}; use sp_core::{H160, H256}; use sp_runtime::RuntimeDebug; @@ -26,6 +27,8 @@ pub enum VerificationError { InvalidLog, /// Unable to verify the transaction receipt with the provided proof InvalidProof, + /// Unable to verify the execution header with ancestry proof + InvalidExecutionProof(#[codec(skip)] &'static str), } pub type MessageNonce = u64; @@ -66,10 +69,15 @@ impl Log { /// Inclusion proof for a transaction receipt #[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] pub struct Proof { - // The block hash of the block in which the receipt was included. - pub block_hash: H256, - // The index of the transaction (and receipt) within the block. - pub tx_index: u32, // Proof keys and values (receipts tree) - pub data: (Vec>, Vec>), + pub receipt_proof: (Vec>, Vec>), + // Proof that an execution header was finalized by the beacon chain + pub execution_proof: ExecutionProof, +} + +#[derive(Clone, RuntimeDebug)] +pub struct InboundQueueFixture { + pub message: Message, + pub finalized_header: BeaconHeader, + pub block_roots_root: H256, } diff --git a/bridges/snowbridge/primitives/core/src/outbound.rs b/bridges/snowbridge/primitives/core/src/outbound.rs index efd993dfc488..7c399844e040 100644 --- a/bridges/snowbridge/primitives/core/src/outbound.rs +++ b/bridges/snowbridge/primitives/core/src/outbound.rs @@ -137,6 +137,8 @@ mod v1 { exchange_rate: UD60x18, // Cost of delivering a message from Ethereum to BridgeHub, in ROC/KSM/DOT delivery_cost: u128, + // Fee multiplier + multiplier: UD60x18, }, } @@ -204,10 +206,11 @@ mod v1 { Token::Uint(U256::from(*transfer_asset_xcm)), Token::Uint(*register_token), ])]), - Command::SetPricingParameters { exchange_rate, delivery_cost } => + Command::SetPricingParameters { exchange_rate, delivery_cost, multiplier } => ethabi::encode(&[Token::Tuple(vec![ Token::Uint(exchange_rate.clone().into_inner()), Token::Uint(U256::from(*delivery_cost)), + Token::Uint(multiplier.clone().into_inner()), ])]), } } @@ -274,7 +277,8 @@ mod v1 { } } -#[cfg_attr(feature = "std", derive(PartialEq, Debug))] +#[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo)] +#[cfg_attr(feature = "std", derive(PartialEq))] /// Fee for delivering message pub struct Fee where @@ -347,12 +351,13 @@ pub trait GasMeter { /// the command within the message const MAXIMUM_BASE_GAS: u64; + /// Total gas consumed at most, including verification & dispatch fn maximum_gas_used_at_most(command: &Command) -> u64 { Self::MAXIMUM_BASE_GAS + Self::maximum_dispatch_gas_used_at_most(command) } - /// Measures the maximum amount of gas a command payload will require to dispatch, AFTER - /// validation & verification. + /// Measures the maximum amount of gas a command payload will require to *dispatch*, NOT + /// including validation & verification. fn maximum_dispatch_gas_used_at_most(command: &Command) -> u64; } diff --git a/bridges/snowbridge/primitives/core/src/pricing.rs b/bridges/snowbridge/primitives/core/src/pricing.rs index bdd7cbd3b5c1..b089c7efe8b9 100644 --- a/bridges/snowbridge/primitives/core/src/pricing.rs +++ b/bridges/snowbridge/primitives/core/src/pricing.rs @@ -12,6 +12,8 @@ pub struct PricingParameters { pub rewards: Rewards, /// Ether (wei) fee per gas unit pub fee_per_gas: U256, + /// Fee multiplier + pub multiplier: FixedU128, } #[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, MaxEncodedLen, TypeInfo)] @@ -42,6 +44,9 @@ where if self.rewards.remote.is_zero() { return Err(InvalidPricingParameters) } + if self.multiplier == FixedU128::zero() { + return Err(InvalidPricingParameters) + } Ok(()) } } diff --git a/bridges/snowbridge/runtime/test-common/Cargo.toml b/bridges/snowbridge/runtime/test-common/Cargo.toml index bd1108b125ae..20bf65e8ef10 100644 --- a/bridges/snowbridge/runtime/test-common/Cargo.toml +++ b/bridges/snowbridge/runtime/test-common/Cargo.toml @@ -14,7 +14,7 @@ workspace = true codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1" } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } smallvec = "1.11.0" diff --git a/bridges/snowbridge/runtime/test-common/src/lib.rs b/bridges/snowbridge/runtime/test-common/src/lib.rs index 7455adf76170..3e2de0e481b8 100644 --- a/bridges/snowbridge/runtime/test-common/src/lib.rs +++ b/bridges/snowbridge/runtime/test-common/src/lib.rs @@ -467,7 +467,6 @@ pub fn ethereum_extrinsic( let initial_checkpoint = make_checkpoint(); let update = make_finalized_header_update(); let sync_committee_update = make_sync_committee_update(); - let execution_header_update = make_execution_header_update(); let alice = Alice; let alice_account = alice.to_account_id(); @@ -494,22 +493,12 @@ pub fn ethereum_extrinsic( } .into(); - let execution_header_call: ::RuntimeCall = - snowbridge_pallet_ethereum_client::Call::::submit_execution_header { - update: Box::new(*execution_header_update), - } - .into(); - let update_outcome = construct_and_apply_extrinsic(alice, update_call.into()); assert_ok!(update_outcome); let sync_committee_outcome = construct_and_apply_extrinsic(alice, update_sync_committee_call.into()); assert_ok!(sync_committee_outcome); - - let execution_header_outcome = - construct_and_apply_extrinsic(alice, execution_header_call.into()); - assert_ok!(execution_header_outcome); }); } @@ -548,7 +537,6 @@ pub fn ethereum_to_polkadot_message_extrinsics_work( .execute_with(|| { let initial_checkpoint = make_checkpoint(); let sync_committee_update = make_sync_committee_update(); - let execution_header_update = make_execution_header_update(); let alice = Alice; let alice_account = alice.to_account_id(); @@ -569,18 +557,8 @@ pub fn ethereum_to_polkadot_message_extrinsics_work( } .into(); - let execution_header_call: ::RuntimeCall = - snowbridge_pallet_ethereum_client::Call::::submit_execution_header { - update: Box::new(*execution_header_update), - } - .into(); - let sync_committee_outcome = construct_and_apply_extrinsic(alice, update_sync_committee_call.into()); assert_ok!(sync_committee_outcome); - - let execution_header_outcome = - construct_and_apply_extrinsic(alice, execution_header_call.into()); - assert_ok!(execution_header_outcome); }); } diff --git a/bridges/snowbridge/scripts/contribute-upstream.sh b/bridges/snowbridge/scripts/contribute-upstream.sh index 32005b770ecf..529057c3f26f 100755 --- a/bridges/snowbridge/scripts/contribute-upstream.sh +++ b/bridges/snowbridge/scripts/contribute-upstream.sh @@ -79,4 +79,10 @@ git fetch parity master git checkout parity/master -- .github git add -- .github +git commit -m "cleanup branch" + +# Fetch the latest from parity master +echo "Fetching latest from Parity master. Resolve merge conflicts, if there are any." +git fetch parity master +git merge parity/master echo "OK" diff --git a/bridges/testing/environments/rococo-westend/rococo.zndsl b/bridges/testing/environments/rococo-westend/rococo.zndsl index 5b49c7c632fa..a75286445a24 100644 --- a/bridges/testing/environments/rococo-westend/rococo.zndsl +++ b/bridges/testing/environments/rococo-westend/rococo.zndsl @@ -1,7 +1,7 @@ -Description: Check if the with-Westend GRANPDA pallet was initialized at Rococo BH +Description: Check if the with-Westend GRANDPA pallet was initialized at Rococo BH Network: ./bridge_hub_rococo_local_network.toml Creds: config -# relay is already started - let's wait until with-Westend GRANPDA pallet is initialized at Rococo +# relay is already started - let's wait until with-Westend GRANDPA pallet is initialized at Rococo bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/best-finalized-header-at-bridged-chain.js with "Westend,0" within 400 seconds diff --git a/bridges/testing/environments/rococo-westend/westend.zndsl b/bridges/testing/environments/rococo-westend/westend.zndsl index 07968838852f..21d4ebf3b05b 100644 --- a/bridges/testing/environments/rococo-westend/westend.zndsl +++ b/bridges/testing/environments/rococo-westend/westend.zndsl @@ -1,6 +1,6 @@ -Description: Check if the with-Rococo GRANPDA pallet was initialized at Westend BH +Description: Check if the with-Rococo GRANDPA pallet was initialized at Westend BH Network: ./bridge_hub_westend_local_network.toml Creds: config -# relay is already started - let's wait until with-Rococo GRANPDA pallet is initialized at Westend +# relay is already started - let's wait until with-Rococo GRANDPA pallet is initialized at Westend bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/best-finalized-header-at-bridged-chain.js with "Rococo,0" within 400 seconds diff --git a/bridges/testing/framework/utils/generate_hex_encoded_call/index.js b/bridges/testing/framework/utils/generate_hex_encoded_call/index.js index 30f89d754ceb..c8e361b25a9c 100644 --- a/bridges/testing/framework/utils/generate_hex_encoded_call/index.js +++ b/bridges/testing/framework/utils/generate_hex_encoded_call/index.js @@ -126,36 +126,36 @@ if (!process.argv[2] || !process.argv[3]) { } const type = process.argv[2]; -const rpcEnpoint = process.argv[3]; +const rpcEndpoint = process.argv[3]; const output = process.argv[4]; const inputArgs = process.argv.slice(5, process.argv.length); console.log(`Generating hex-encoded call data for:`); console.log(` type: ${type}`); -console.log(` rpcEnpoint: ${rpcEnpoint}`); +console.log(` rpcEndpoint: ${rpcEndpoint}`); console.log(` output: ${output}`); console.log(` inputArgs: ${inputArgs}`); switch (type) { case 'remark-with-event': - remarkWithEvent(rpcEnpoint, output); + remarkWithEvent(rpcEndpoint, output); break; case 'add-exporter-config': - addExporterConfig(rpcEnpoint, output, inputArgs[0], inputArgs[1]); + addExporterConfig(rpcEndpoint, output, inputArgs[0], inputArgs[1]); break; case 'remove-exporter-config': - removeExporterConfig(rpcEnpoint, output, inputArgs[0], inputArgs[1]); + removeExporterConfig(rpcEndpoint, output, inputArgs[0], inputArgs[1]); break; case 'add-universal-alias': - addUniversalAlias(rpcEnpoint, output, inputArgs[0], inputArgs[1]); + addUniversalAlias(rpcEndpoint, output, inputArgs[0], inputArgs[1]); break; case 'add-reserve-location': - addReserveLocation(rpcEnpoint, output, inputArgs[0]); + addReserveLocation(rpcEndpoint, output, inputArgs[0]); break; case 'force-create-asset': - forceCreateAsset(rpcEnpoint, output, inputArgs[0], inputArgs[1], inputArgs[2], inputArgs[3]); + forceCreateAsset(rpcEndpoint, output, inputArgs[0], inputArgs[1], inputArgs[2], inputArgs[3]); break; case 'force-xcm-version': - forceXcmVersion(rpcEnpoint, output, inputArgs[0], inputArgs[1]); + forceXcmVersion(rpcEndpoint, output, inputArgs[0], inputArgs[1]); break; case 'check': console.log(`Checking nodejs installation, if you see this everything is ready!`); diff --git a/bridges/testing/run-tests.sh b/bridges/testing/run-tests.sh index 6149d9912653..fd12b57f5334 100755 --- a/bridges/testing/run-tests.sh +++ b/bridges/testing/run-tests.sh @@ -30,7 +30,7 @@ done export POLKADOT_SDK_PATH=`realpath $(dirname "$0")/../..` export BRIDGE_TESTS_FOLDER=$POLKADOT_SDK_PATH/bridges/testing/tests -# set pathc to binaries +# set path to binaries if [ "$ZOMBIENET_DOCKER_PATHS" -eq 1 ]; then export POLKADOT_BINARY=/usr/local/bin/polkadot export POLKADOT_PARACHAIN_BINARY=/usr/local/bin/polkadot-parachain diff --git a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh b/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh index 7d5b8d927366..3a604b3876d9 100755 --- a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh +++ b/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh @@ -24,7 +24,7 @@ echo -e "Sleeping 90s before starting relayer ...\n" sleep 90 ${BASH_SOURCE%/*}/../../environments/rococo-westend/start_relayer.sh $rococo_dir $westend_dir relayer_pid -# Sometimes the relayer syncs multiple parachain heads in the begining leading to test failures. +# Sometimes the relayer syncs multiple parachain heads in the beginning leading to test failures. # See issue: https://github.com/paritytech/parity-bridges-common/issues/2838. # TODO: Remove this sleep after the issue is fixed. echo -e "Sleeping 180s before runing the tests ...\n" diff --git a/cumulus/client/collator/Cargo.toml b/cumulus/client/collator/Cargo.toml index 0e911b9f3abf..42f7342d1a53 100644 --- a/cumulus/client/collator/Cargo.toml +++ b/cumulus/client/collator/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] parking_lot = "0.12.1" codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } -futures = "0.3.21" +futures = "0.3.30" tracing = "0.1.25" # Substrate @@ -34,7 +34,7 @@ cumulus-client-network = { path = "../network" } cumulus-primitives-core = { path = "../../primitives/core" } [dev-dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" # Substrate sp-maybe-compressed-blob = { path = "../../../substrate/primitives/maybe-compressed-blob" } diff --git a/cumulus/client/consensus/aura/Cargo.toml b/cumulus/client/consensus/aura/Cargo.toml index e815e89d8ce3..70dd67cb9a00 100644 --- a/cumulus/client/consensus/aura/Cargo.toml +++ b/cumulus/client/consensus/aura/Cargo.toml @@ -10,7 +10,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } futures = "0.3.28" tracing = "0.1.37" @@ -41,7 +41,7 @@ substrate-prometheus-endpoint = { path = "../../../../substrate/utils/prometheus cumulus-client-consensus-common = { path = "../common" } cumulus-relay-chain-interface = { path = "../../relay-chain-interface" } cumulus-client-consensus-proposer = { path = "../proposer" } -cumulus-client-parachain-inherent = { path = "../../../client/parachain-inherent" } +cumulus-client-parachain-inherent = { path = "../../parachain-inherent" } cumulus-primitives-aura = { path = "../../../primitives/aura" } cumulus-primitives-core = { path = "../../../primitives/core" } cumulus-client-collator = { path = "../../collator" } diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs index 21dbc7bc5991..03341ad1d3d0 100644 --- a/cumulus/client/consensus/aura/src/collators/lookahead.rs +++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs @@ -49,7 +49,7 @@ use polkadot_node_subsystem::messages::{ CollationGenerationMessage, RuntimeApiMessage, RuntimeApiRequest, }; use polkadot_overseer::Handle as OverseerHandle; -use polkadot_primitives::{CollatorPair, Id as ParaId, OccupiedCoreAssumption}; +use polkadot_primitives::{CollatorPair, CoreIndex, Id as ParaId, OccupiedCoreAssumption}; use futures::{channel::oneshot, prelude::*}; use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf}; @@ -184,7 +184,15 @@ where while let Some(relay_parent_header) = import_notifications.next().await { let relay_parent = relay_parent_header.hash(); - if !is_para_scheduled(relay_parent, params.para_id, &mut params.overseer_handle).await { + // TODO: Currently we use just the first core here, but for elastic scaling + // we iterate and build on all of the cores returned. + let core_index = if let Some(core_index) = + cores_scheduled_for_para(relay_parent, params.para_id, &mut params.overseer_handle) + .await + .get(0) + { + *core_index + } else { tracing::trace!( target: crate::LOG_TARGET, ?relay_parent, @@ -193,7 +201,7 @@ where ); continue - } + }; let max_pov_size = match params .relay_client @@ -396,6 +404,7 @@ where parent_head: parent_header.encode().into(), validation_code_hash, result_sender: None, + core_index, }, ), "SubmitCollation", @@ -480,14 +489,12 @@ async fn max_ancestry_lookback( } } -// Checks if there exists a scheduled core for the para at the provided relay parent. -// -// Falls back to `false` in case of an error. -async fn is_para_scheduled( +// Return all the cores assigned to the para at the provided relay parent. +async fn cores_scheduled_for_para( relay_parent: PHash, para_id: ParaId, overseer_handle: &mut OverseerHandle, -) -> bool { +) -> Vec { let (tx, rx) = oneshot::channel(); let request = RuntimeApiRequest::AvailabilityCores(tx); overseer_handle @@ -503,7 +510,7 @@ async fn is_para_scheduled( ?relay_parent, "Failed to query availability cores runtime API", ); - return false + return Vec::new() }, Err(oneshot::Canceled) => { tracing::error!( @@ -511,9 +518,19 @@ async fn is_para_scheduled( ?relay_parent, "Sender for availability cores runtime request dropped", ); - return false + return Vec::new() }, }; - cores.iter().any(|core| core.para_id() == Some(para_id)) + cores + .iter() + .enumerate() + .filter_map(|(index, core)| { + if core.para_id() == Some(para_id) { + Some(CoreIndex(index as u32)) + } else { + None + } + }) + .collect() } diff --git a/cumulus/client/consensus/common/Cargo.toml b/cumulus/client/consensus/common/Cargo.toml index 5a014b10e35f..fb4a85ad1226 100644 --- a/cumulus/client/consensus/common/Cargo.toml +++ b/cumulus/client/consensus/common/Cargo.toml @@ -10,7 +10,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } dyn-clone = "1.0.16" futures = "0.3.28" diff --git a/cumulus/client/consensus/common/src/level_monitor.rs b/cumulus/client/consensus/common/src/level_monitor.rs index 270e3f57ae5a..fb4b0498f688 100644 --- a/cumulus/client/consensus/common/src/level_monitor.rs +++ b/cumulus/client/consensus/common/src/level_monitor.rs @@ -158,7 +158,7 @@ where /// the limit passed to the constructor. /// /// If the given level is found to have a number of blocks greater than or equal the limit - /// then the limit is enforced by chosing one (or more) blocks to remove. + /// then the limit is enforced by choosing one (or more) blocks to remove. /// /// The removal strategy is driven by the block freshness. /// diff --git a/cumulus/client/consensus/common/src/tests.rs b/cumulus/client/consensus/common/src/tests.rs index bfb95ae388ae..7816d3a4c40a 100644 --- a/cumulus/client/consensus/common/src/tests.rs +++ b/cumulus/client/consensus/common/src/tests.rs @@ -832,7 +832,7 @@ fn restore_limit_monitor() { .collect::>(); // Scenario before limit application (with B11 imported as best) - // Import order (freshess): B00, B10, B11, B12, B20, B21 + // Import order (freshness): B00, B10, B11, B12, B20, B21 // // B00 --+-- B10 --+-- B20 // | +-- B21 diff --git a/cumulus/client/consensus/proposer/Cargo.toml b/cumulus/client/consensus/proposer/Cargo.toml index b37232bb4485..42ca4e06f8f4 100644 --- a/cumulus/client/consensus/proposer/Cargo.toml +++ b/cumulus/client/consensus/proposer/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] anyhow = "1.0" -async-trait = "0.1.74" +async-trait = "0.1.79" thiserror = { workspace = true } # Substrate diff --git a/cumulus/client/consensus/relay-chain/Cargo.toml b/cumulus/client/consensus/relay-chain/Cargo.toml index 3d06d6b89ef7..cb32b9804576 100644 --- a/cumulus/client/consensus/relay-chain/Cargo.toml +++ b/cumulus/client/consensus/relay-chain/Cargo.toml @@ -10,7 +10,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" futures = "0.3.28" parking_lot = "0.12.1" tracing = "0.1.37" diff --git a/cumulus/client/network/Cargo.toml b/cumulus/client/network/Cargo.toml index 995ef606d270..1210975ef690 100644 --- a/cumulus/client/network/Cargo.toml +++ b/cumulus/client/network/Cargo.toml @@ -10,7 +10,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } futures = "0.3.28" futures-timer = "3.0.2" diff --git a/cumulus/client/network/src/tests.rs b/cumulus/client/network/src/tests.rs index d986635f961c..3f5757d5eac1 100644 --- a/cumulus/client/network/src/tests.rs +++ b/cumulus/client/network/src/tests.rs @@ -144,7 +144,7 @@ impl RelayChainInterface for DummyRelayChainInterface { persisted_validation_data_hash: PHash::random(), pov_hash: PHash::random(), erasure_root: PHash::random(), - signature: sp_core::sr25519::Signature([0u8; 64]).into(), + signature: sp_core::sr25519::Signature::default().into(), validation_code_hash: ValidationCodeHash::from(PHash::random()), }, commitments: CandidateCommitments { @@ -325,7 +325,7 @@ async fn make_gossip_message_and_header( persisted_validation_data_hash: PHash::random(), pov_hash: PHash::random(), erasure_root: PHash::random(), - signature: sp_core::sr25519::Signature([0u8; 64]).into(), + signature: sp_core::sr25519::Signature::default().into(), para_head: polkadot_parachain_primitives::primitives::HeadData(header.encode()).hash(), validation_code_hash: ValidationCodeHash::from(PHash::random()), }, @@ -516,7 +516,7 @@ async fn check_statement_seconded() { persisted_validation_data_hash: PHash::random(), pov_hash: PHash::random(), erasure_root: PHash::random(), - signature: sp_core::sr25519::Signature([0u8; 64]).into(), + signature: sp_core::sr25519::Signature::default().into(), validation_code_hash: ValidationCodeHash::from(PHash::random()), }, }, diff --git a/cumulus/client/parachain-inherent/Cargo.toml b/cumulus/client/parachain-inherent/Cargo.toml index 33718fa0578c..6dbe65899bab 100644 --- a/cumulus/client/parachain-inherent/Cargo.toml +++ b/cumulus/client/parachain-inherent/Cargo.toml @@ -7,9 +7,9 @@ description = "Inherent that needs to be present in every parachain block. Conta license = "Apache-2.0" [dependencies] -async-trait = "0.1.73" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } -scale-info = { version = "2.10.0", features = ["derive"] } +scale-info = { version = "2.11.1", features = ["derive"] } tracing = { version = "0.1.37" } # Substrate diff --git a/cumulus/client/pov-recovery/Cargo.toml b/cumulus/client/pov-recovery/Cargo.toml index 375a57a87c2a..571935620d6d 100644 --- a/cumulus/client/pov-recovery/Cargo.toml +++ b/cumulus/client/pov-recovery/Cargo.toml @@ -32,7 +32,7 @@ polkadot-primitives = { path = "../../../polkadot/primitives" } # Cumulus cumulus-primitives-core = { path = "../../primitives/core" } cumulus-relay-chain-interface = { path = "../relay-chain-interface" } -async-trait = "0.1.74" +async-trait = "0.1.79" [dev-dependencies] tokio = { version = "1.32.0", features = ["macros"] } diff --git a/cumulus/client/pov-recovery/src/lib.rs b/cumulus/client/pov-recovery/src/lib.rs index 32aba6c8993a..0ca21749c3eb 100644 --- a/cumulus/client/pov-recovery/src/lib.rs +++ b/cumulus/client/pov-recovery/src/lib.rs @@ -18,7 +18,7 @@ //! //! A parachain needs to build PoVs that are send to the relay chain to progress. These PoVs are //! erasure encoded and one piece of it is stored by each relay chain validator. As the relay chain -//! decides on which PoV per parachain to include and thus, to progess the parachain it can happen +//! decides on which PoV per parachain to include and thus, to progress the parachain it can happen //! that the block corresponding to this PoV isn't propagated in the parachain network. This can //! have several reasons, either a malicious collator that managed to include its own PoV and //! doesn't want to share it with the rest of the network or maybe a collator went down before it @@ -338,8 +338,8 @@ where let mut blocks_to_delete = vec![hash]; while let Some(delete) = blocks_to_delete.pop() { - if let Some(childs) = self.waiting_for_parent.remove(&delete) { - blocks_to_delete.extend(childs.iter().map(BlockT::hash)); + if let Some(children) = self.waiting_for_parent.remove(&delete) { + blocks_to_delete.extend(children.iter().map(BlockT::hash)); } } self.clear_waiting_recovery(&hash); @@ -448,7 +448,7 @@ where /// Import the given `block`. /// - /// This will also recursivley drain `waiting_for_parent` and import them as well. + /// This will also recursively drain `waiting_for_parent` and import them as well. fn import_block(&mut self, block: Block) { let mut blocks = VecDeque::new(); @@ -495,7 +495,7 @@ where tracing::debug!( target: LOG_TARGET, block_hash = ?hash, - "Cound not recover. Block was never announced as candidate" + "Could not recover. Block was never announced as candidate" ); return }, diff --git a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml index aa16230cd8af..7629b6c631a3 100644 --- a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml +++ b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml @@ -10,7 +10,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" futures = "0.3.28" futures-timer = "3.0.2" diff --git a/cumulus/client/relay-chain-interface/Cargo.toml b/cumulus/client/relay-chain-interface/Cargo.toml index 6e652b892104..6df9847252fe 100644 --- a/cumulus/client/relay-chain-interface/Cargo.toml +++ b/cumulus/client/relay-chain-interface/Cargo.toml @@ -20,7 +20,7 @@ sp-state-machine = { path = "../../../substrate/primitives/state-machine" } sc-client-api = { path = "../../../substrate/client/api" } futures = "0.3.28" -async-trait = "0.1.74" +async-trait = "0.1.79" thiserror = { workspace = true } jsonrpsee-core = "0.22" parity-scale-codec = "3.6.4" diff --git a/cumulus/client/relay-chain-minimal-node/Cargo.toml b/cumulus/client/relay-chain-minimal-node/Cargo.toml index 98240c92adab..6860b42a5078 100644 --- a/cumulus/client/relay-chain-minimal-node/Cargo.toml +++ b/cumulus/client/relay-chain-minimal-node/Cargo.toml @@ -49,6 +49,6 @@ cumulus-primitives-core = { path = "../../primitives/core" } array-bytes = "6.1" tracing = "0.1.37" -async-trait = "0.1.74" +async-trait = "0.1.79" futures = "0.3.28" parking_lot = "0.12.1" diff --git a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs index ab56b62c4ca5..aa5e67e453f6 100644 --- a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs +++ b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs @@ -14,7 +14,10 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -use std::pin::Pin; +use std::{ + collections::{BTreeMap, VecDeque}, + pin::Pin, +}; use cumulus_relay_chain_interface::{RelayChainError, RelayChainResult}; use cumulus_relay_chain_rpc_interface::RelayChainRpcClient; @@ -23,8 +26,7 @@ use polkadot_core_primitives::{Block, BlockNumber, Hash, Header}; use polkadot_overseer::{ChainApiBackend, RuntimeApiSubsystemClient}; use polkadot_primitives::{ async_backing::{AsyncBackingParams, BackingState}, - slashing, - vstaging::{ApprovalVotingParams, NodeFeatures}, + slashing, ApprovalVotingParams, CoreIndex, NodeFeatures, }; use sc_authority_discovery::{AuthorityDiscovery, Error as AuthorityDiscoveryError}; use sc_client_api::AuxStore; @@ -442,6 +444,13 @@ impl RuntimeApiSubsystemClient for BlockChainRpcClient { async fn node_features(&self, at: Hash) -> Result { Ok(self.rpc_client.parachain_host_node_features(at).await?) } + + async fn claim_queue( + &self, + at: Hash, + ) -> Result>, ApiError> { + Ok(self.rpc_client.parachain_host_claim_queue(at).await?) + } } #[async_trait::async_trait] diff --git a/cumulus/client/relay-chain-minimal-node/src/lib.rs b/cumulus/client/relay-chain-minimal-node/src/lib.rs index 4bccca59fe3e..6aea043713d8 100644 --- a/cumulus/client/relay-chain-minimal-node/src/lib.rs +++ b/cumulus/client/relay-chain-minimal-node/src/lib.rs @@ -55,6 +55,7 @@ fn build_authority_discovery_service( prometheus_registry: Option, ) -> AuthorityDiscoveryService { let auth_disc_publish_non_global_ips = config.network.allow_non_globals_in_dht; + let auth_disc_public_addresses = config.network.public_addresses.clone(); let authority_discovery_role = sc_authority_discovery::Role::Discover; let dht_event_stream = network.event_stream("authority-discovery").filter_map(|e| async move { match e { @@ -65,6 +66,7 @@ fn build_authority_discovery_service( let (worker, service) = sc_authority_discovery::new_worker_and_service_with_config( sc_authority_discovery::WorkerConfig { publish_non_global_ips: auth_disc_publish_non_global_ips, + public_addresses: auth_disc_public_addresses, // Require that authority discovery records are signed. strict_record_validation: true, ..Default::default() diff --git a/cumulus/client/relay-chain-rpc-interface/Cargo.toml b/cumulus/client/relay-chain-rpc-interface/Cargo.toml index 801712b1ad15..149816772895 100644 --- a/cumulus/client/relay-chain-rpc-interface/Cargo.toml +++ b/cumulus/client/relay-chain-rpc-interface/Cargo.toml @@ -35,7 +35,7 @@ futures-timer = "3.0.2" parity-scale-codec = "3.6.4" jsonrpsee = { version = "0.22", features = ["ws-client"] } tracing = "0.1.37" -async-trait = "0.1.74" +async-trait = "0.1.79" url = "2.4.0" serde_json = { workspace = true, default-features = true } serde = { workspace = true, default-features = true } diff --git a/cumulus/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs b/cumulus/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs index b716feef1c99..48d35dd3a55e 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs @@ -293,7 +293,8 @@ impl ReconnectingWebsocketWorker { /// listeners. If an error occurs during sending, the receiver has been closed and we remove /// the sender from the list. /// - Find a new valid RPC server to connect to in case the websocket connection is terminated. - /// If the worker is not able to connec to an RPC server from the list, the worker shuts down. + /// If the worker is not able to connect to an RPC server from the list, the worker shuts + /// down. pub async fn run(mut self) { let mut pending_requests = FuturesUnordered::new(); diff --git a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs index 6a6f015c9b8c..a1915ca7ea9f 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs @@ -24,6 +24,7 @@ use jsonrpsee::{ }; use serde::de::DeserializeOwned; use serde_json::Value as JsonValue; +use std::collections::VecDeque; use tokio::sync::mpsc::Sender as TokioSender; use parity_scale_codec::{Decode, Encode}; @@ -31,13 +32,12 @@ use parity_scale_codec::{Decode, Encode}; use cumulus_primitives_core::{ relay_chain::{ async_backing::{AsyncBackingParams, BackingState}, - slashing, - vstaging::{ApprovalVotingParams, NodeFeatures}, - BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, - Hash as RelayHash, Header as RelayHeader, InboundHrmpMessage, OccupiedCoreAssumption, - PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, - ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, + slashing, ApprovalVotingParams, BlockNumber, CandidateCommitments, CandidateEvent, + CandidateHash, CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, + ExecutorParams, GroupRotationInfo, Hash as RelayHash, Header as RelayHeader, + InboundHrmpMessage, NodeFeatures, OccupiedCoreAssumption, PvfCheckStatement, + ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, + ValidatorId, ValidatorIndex, ValidatorSignature, }, InboundDownwardMessage, ParaId, PersistedValidationData, }; @@ -647,6 +647,14 @@ impl RelayChainRpcClient { .await } + pub async fn parachain_host_claim_queue( + &self, + at: RelayHash, + ) -> Result>, RelayChainError> { + self.call_remote_runtime_function("ParachainHost_claim_queue", at, None::<()>) + .await + } + pub async fn validation_code_hash( &self, at: RelayHash, diff --git a/cumulus/client/service/Cargo.toml b/cumulus/client/service/Cargo.toml index 2bafbee951a9..e03e20fe5b41 100644 --- a/cumulus/client/service/Cargo.toml +++ b/cumulus/client/service/Cargo.toml @@ -30,6 +30,7 @@ sp-consensus = { path = "../../../substrate/primitives/consensus/common" } sp-core = { path = "../../../substrate/primitives/core" } sp-runtime = { path = "../../../substrate/primitives/runtime" } sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool" } +sp-io = { path = "../../../substrate/primitives/io" } # Polkadot polkadot-primitives = { path = "../../../polkadot/primitives" } diff --git a/cumulus/client/service/src/lib.rs b/cumulus/client/service/src/lib.rs index 950e59aff24e..91e884d6f7ec 100644 --- a/cumulus/client/service/src/lib.rs +++ b/cumulus/client/service/src/lib.rs @@ -54,6 +54,15 @@ use std::{sync::Arc, time::Duration}; pub use cumulus_primitives_proof_size_hostfunction::storage_proof_size; +/// Host functions that should be used in parachain nodes. +/// +/// Contains the standard substrate host functions, as well as a +/// host function to enable PoV-reclaim on parachain nodes. +pub type ParachainHostFunctions = ( + cumulus_primitives_proof_size_hostfunction::storage_proof_size::HostFunctions, + sp_io::SubstrateHostFunctions, +); + // Given the sporadic nature of the explicit recovery operation and the // possibility to retry infinite times this value is more than enough. // In practice here we expect no more than one queued messages. diff --git a/cumulus/pallets/aura-ext/Cargo.toml b/cumulus/pallets/aura-ext/Cargo.toml index 9eb5ad558459..b5e2e0715303 100644 --- a/cumulus/pallets/aura-ext/Cargo.toml +++ b/cumulus/pallets/aura-ext/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-support = { path = "../../../substrate/frame/support", default-features = false } diff --git a/cumulus/pallets/collator-selection/Cargo.toml b/cumulus/pallets/collator-selection/Cargo.toml index 1d244d10f2fb..b9970d3969f3 100644 --- a/cumulus/pallets/collator-selection/Cargo.toml +++ b/cumulus/pallets/collator-selection/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] log = { workspace = true } codec = { default-features = false, features = ["derive"], package = "parity-scale-codec", version = "3.0.0" } rand = { version = "0.8.5", features = ["std_rng"], default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } sp-staking = { path = "../../../substrate/primitives/staking", default-features = false } diff --git a/cumulus/pallets/dmp-queue/Cargo.toml b/cumulus/pallets/dmp-queue/Cargo.toml index 9dfe68e2529a..33fb79899305 100644 --- a/cumulus/pallets/dmp-queue/Cargo.toml +++ b/cumulus/pallets/dmp-queue/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-support = { path = "../../../substrate/frame/support", default-features = false } diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 54a5ef4097c5..82032e100b34 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -16,7 +16,7 @@ environmental = { version = "1.1.4", default-features = false } impl-trait-for-tuples = "0.2.1" log = { workspace = true } trie-db = { version = "0.28.0", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 7c58f6b96a56..adc5c501bba5 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -467,7 +467,7 @@ pub mod pallet { // One complication here, is that the `host_configuration` is updated by an inherent // and those are processed after the block initialization phase. Therefore, we have to // be content only with the configuration as per the previous block. That means that - // the configuration can be either stale (or be abscent altogether in case of the + // the configuration can be either stale (or be absent altogether in case of the // beginning of the chain). // // In order to mitigate this, we do the following. At the time, we are only concerned diff --git a/cumulus/pallets/parachain-system/src/mock.rs b/cumulus/pallets/parachain-system/src/mock.rs index 7920f31a6a04..e59d49a68d1a 100644 --- a/cumulus/pallets/parachain-system/src/mock.rs +++ b/cumulus/pallets/parachain-system/src/mock.rs @@ -126,6 +126,7 @@ impl pallet_message_queue::Config for Test { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MaxWeight; + type IdleMaxServiceWeight = (); type WeightInfo = (); } diff --git a/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs b/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs index 94aba87f98c9..1d6254e617ff 100644 --- a/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs +++ b/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs @@ -91,7 +91,7 @@ pub enum Error { DmqMqcHead(ReadEntryErr), /// Relay dispatch queue cannot be extracted. RelayDispatchQueueRemainingCapacity(ReadEntryErr), - /// The hrmp inress channel index cannot be extracted. + /// The hrmp ingress channel index cannot be extracted. HrmpIngressChannelIndex(ReadEntryErr), /// The hrmp egress channel index cannot be extracted. HrmpEgressChannelIndex(ReadEntryErr), diff --git a/cumulus/pallets/solo-to-para/Cargo.toml b/cumulus/pallets/solo-to-para/Cargo.toml index 6bce7344d440..2b57e200b803 100644 --- a/cumulus/pallets/solo-to-para/Cargo.toml +++ b/cumulus/pallets/solo-to-para/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-support = { path = "../../../substrate/frame/support", default-features = false } diff --git a/cumulus/pallets/xcm/Cargo.toml b/cumulus/pallets/xcm/Cargo.toml index 6c16bd306aa1..c2a30c006848 100644 --- a/cumulus/pallets/xcm/Cargo.toml +++ b/cumulus/pallets/xcm/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-io = { path = "../../../substrate/primitives/io", default-features = false } sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } diff --git a/cumulus/pallets/xcmp-queue/Cargo.toml b/cumulus/pallets/xcmp-queue/Cargo.toml index 514af2c4c1a3..bb9d8eaaa108 100644 --- a/cumulus/pallets/xcmp-queue/Cargo.toml +++ b/cumulus/pallets/xcmp-queue/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"], default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-support = { path = "../../../substrate/frame/support", default-features = false } diff --git a/cumulus/pallets/xcmp-queue/src/lib.rs b/cumulus/pallets/xcmp-queue/src/lib.rs index 824989480c41..d28f08fbf45c 100644 --- a/cumulus/pallets/xcmp-queue/src/lib.rs +++ b/cumulus/pallets/xcmp-queue/src/lib.rs @@ -465,7 +465,7 @@ impl Pallet { // Max message size refers to aggregates, or pages. Not to individual fragments. let max_message_size = channel_info.max_message_size as usize; let format_size = format.encoded_size(); - // We check the encoded fragment length plus the format size agains the max message size + // We check the encoded fragment length plus the format size against the max message size // because the format is concatenated if a new page is needed. let size_to_check = encoded_fragment .len() diff --git a/cumulus/pallets/xcmp-queue/src/migration.rs b/cumulus/pallets/xcmp-queue/src/migration.rs index c7fa61a3e3f0..1702cd70bc2f 100644 --- a/cumulus/pallets/xcmp-queue/src/migration.rs +++ b/cumulus/pallets/xcmp-queue/src/migration.rs @@ -20,7 +20,7 @@ use crate::{Config, OverweightIndex, Pallet, QueueConfig, QueueConfigData, DEFAU use cumulus_primitives_core::XcmpMessageFormat; use frame_support::{ pallet_prelude::*, - traits::{EnqueueMessage, OnRuntimeUpgrade, StorageVersion}, + traits::{EnqueueMessage, StorageVersion, UncheckedOnRuntimeUpgrade}, weights::{constants::WEIGHT_REF_TIME_PER_MILLIS, Weight}, }; @@ -96,7 +96,7 @@ pub mod v2 { /// 2D weights). pub struct UncheckedMigrationToV2(PhantomData); - impl OnRuntimeUpgrade for UncheckedMigrationToV2 { + impl UncheckedOnRuntimeUpgrade for UncheckedMigrationToV2 { #[allow(deprecated)] fn on_runtime_upgrade() -> Weight { let translate = |pre: v1::QueueConfigData| -> v2::QueueConfigData { @@ -187,7 +187,7 @@ pub mod v3 { /// Migrates the pallet storage to v3. pub struct UncheckedMigrationToV3(PhantomData); - impl OnRuntimeUpgrade for UncheckedMigrationToV3 { + impl UncheckedOnRuntimeUpgrade for UncheckedMigrationToV3 { fn on_runtime_upgrade() -> Weight { #[frame_support::storage_alias] type Overweight = @@ -266,7 +266,7 @@ pub mod v4 { /// thresholds to at least the default values. pub struct UncheckedMigrationToV4(PhantomData); - impl OnRuntimeUpgrade for UncheckedMigrationToV4 { + impl UncheckedOnRuntimeUpgrade for UncheckedMigrationToV4 { fn on_runtime_upgrade() -> Weight { let translate = |pre: v2::QueueConfigData| -> QueueConfigData { let pre_default = v2::QueueConfigData::default(); @@ -315,6 +315,7 @@ pub mod v4 { mod tests { use super::*; use crate::mock::{new_test_ext, Test}; + use frame_support::traits::OnRuntimeUpgrade; #[test] #[allow(deprecated)] diff --git a/cumulus/pallets/xcmp-queue/src/mock.rs b/cumulus/pallets/xcmp-queue/src/mock.rs index 294f978f6ad5..9d9a723cf8b5 100644 --- a/cumulus/pallets/xcmp-queue/src/mock.rs +++ b/cumulus/pallets/xcmp-queue/src/mock.rs @@ -175,6 +175,9 @@ impl xcm_executor::Config for XcmConfig { type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); } pub type XcmRouter = ( diff --git a/cumulus/parachains/chain-specs/asset-hub-kusama.json b/cumulus/parachains/chain-specs/asset-hub-kusama.json index 654275eade81..66a705a40869 100644 --- a/cumulus/parachains/chain-specs/asset-hub-kusama.json +++ b/cumulus/parachains/chain-specs/asset-hub-kusama.json @@ -27,7 +27,8 @@ "/dns/mine14.rotko.net/tcp/34524/ws/p2p/12D3KooWJUFnjR2PNbsJhudwPVaWCoZy1acPGKjM2cSuGj345BBu", "/dns/mine14.rotko.net/tcp/35524/wss/p2p/12D3KooWJUFnjR2PNbsJhudwPVaWCoZy1acPGKjM2cSuGj345BBu", "/dns/asset-hub-kusama.bootnodes.polkadotters.com/tcp/30511/p2p/12D3KooWDpk7wVH7RgjErEvbvAZ2kY5VeaAwRJP5ojmn1e8b8UbU", - "/dns/asset-hub-kusama.bootnodes.polkadotters.com/tcp/30513/wss/p2p/12D3KooWDpk7wVH7RgjErEvbvAZ2kY5VeaAwRJP5ojmn1e8b8UbU" + "/dns/asset-hub-kusama.bootnodes.polkadotters.com/tcp/30513/wss/p2p/12D3KooWDpk7wVH7RgjErEvbvAZ2kY5VeaAwRJP5ojmn1e8b8UbU", + "/dns/boot-kusama-assethub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWSwaeFs6FNgpgh54fdoxSDAA4nJNaPE3PAcse2GRrG7b3" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/asset-hub-polkadot.json b/cumulus/parachains/chain-specs/asset-hub-polkadot.json index 454060d2a87a..16caa52ba913 100644 --- a/cumulus/parachains/chain-specs/asset-hub-polkadot.json +++ b/cumulus/parachains/chain-specs/asset-hub-polkadot.json @@ -27,7 +27,8 @@ "/dns/mint14.rotko.net/tcp/34514/ws/p2p/12D3KooWKkzLjYF6M5eEs7nYiqEtRqY8SGVouoCwo3nCWsRnThDW", "/dns/mint14.rotko.net/tcp/35514/wss/p2p/12D3KooWKkzLjYF6M5eEs7nYiqEtRqY8SGVouoCwo3nCWsRnThDW", "/dns/asset-hub-polkadot.bootnodes.polkadotters.com/tcp/30508/p2p/12D3KooWKbfY9a9oywxMJKiALmt7yhrdQkjXMtvxhhDDN23vG93R", - "/dns/asset-hub-polkadot.bootnodes.polkadotters.com/tcp/30510/wss/p2p/12D3KooWKbfY9a9oywxMJKiALmt7yhrdQkjXMtvxhhDDN23vG93R" + "/dns/asset-hub-polkadot.bootnodes.polkadotters.com/tcp/30510/wss/p2p/12D3KooWKbfY9a9oywxMJKiALmt7yhrdQkjXMtvxhhDDN23vG93R", + "/dns/boot-polkadot-assethub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWDR9M7CjV1xdjCRbRwkFn1E7sjMaL4oYxGyDWxuLrFc2J" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-kusama.json b/cumulus/parachains/chain-specs/bridge-hub-kusama.json index 90b70b05016d..6644ea41ab74 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-kusama.json +++ b/cumulus/parachains/chain-specs/bridge-hub-kusama.json @@ -27,7 +27,8 @@ "/dns/kbr13.rotko.net/tcp/34553/ws/p2p/12D3KooWAmBp54mUEYtvsk2kxNEsDbAvdUMcaghxKXgUQxmPEQ66", "/dns/kbr13.rotko.net/tcp/35553/wss/p2p/12D3KooWAmBp54mUEYtvsk2kxNEsDbAvdUMcaghxKXgUQxmPEQ66", "/dns/bridge-hub-kusama.bootnodes.polkadotters.com/tcp/30520/p2p/12D3KooWH3pucezRRS5esoYyzZsUkKWcPSByQxEvmM819QL1HPLV", - "/dns/bridge-hub-kusama.bootnodes.polkadotters.com/tcp/30522/wss/p2p/12D3KooWH3pucezRRS5esoYyzZsUkKWcPSByQxEvmM819QL1HPLV" + "/dns/bridge-hub-kusama.bootnodes.polkadotters.com/tcp/30522/wss/p2p/12D3KooWH3pucezRRS5esoYyzZsUkKWcPSByQxEvmM819QL1HPLV", + "/dns/boot-kusama-bridgehub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWQybw6AFmAvrFfwUQnNxUpS12RovapD6oorh2mAJr4xyd" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-polkadot.json b/cumulus/parachains/chain-specs/bridge-hub-polkadot.json index a9444b89e1e2..c51c5eff89b8 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-polkadot.json +++ b/cumulus/parachains/chain-specs/bridge-hub-polkadot.json @@ -23,7 +23,8 @@ "/dns/pbr13.rotko.net/tcp/34543/ws/p2p/12D3KooWMxZY7tDc2Rh454VaJJ7RexKAXVS6xSBEvTnXSGCnuGDw", "/dns/pbr13.rotko.net/tcp/35543/wss/p2p/12D3KooWMxZY7tDc2Rh454VaJJ7RexKAXVS6xSBEvTnXSGCnuGDw", "/dns/bridge-hub-polkadot.bootnodes.polkadotters.com/tcp/30517/p2p/12D3KooWLUNE3LHPDa1WrrZaYT7ArK66CLM1bPv7kKz74UcLnQRB", - "/dns/bridge-hub-polkadot.bootnodes.polkadotters.com/tcp/30519/wss/p2p/12D3KooWLUNE3LHPDa1WrrZaYT7ArK66CLM1bPv7kKz74UcLnQRB" + "/dns/bridge-hub-polkadot.bootnodes.polkadotters.com/tcp/30519/wss/p2p/12D3KooWLUNE3LHPDa1WrrZaYT7ArK66CLM1bPv7kKz74UcLnQRB", + "/dns/boot-polkadot-bridgehub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWKf3mBXHjLbwtPqv1BdbQuwbFNcQQYxASS7iQ25264AXH" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/collectives-polkadot.json b/cumulus/parachains/chain-specs/collectives-polkadot.json index 259669cf37a0..ce80e21ae625 100644 --- a/cumulus/parachains/chain-specs/collectives-polkadot.json +++ b/cumulus/parachains/chain-specs/collectives-polkadot.json @@ -27,7 +27,8 @@ "/dns/pch13.rotko.net/tcp/34573/ws/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH", "/dns/pch13.rotko.net/tcp/35573/wss/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH", "/dns/collectives-polkadot.bootnodes.polkadotters.com/tcp/30526/p2p/12D3KooWNohUjvJtGKUa8Vhy8C1ZBB5N8JATB6e7rdLVCioeb3ff", - "/dns/collectives-polkadot.bootnodes.polkadotters.com/tcp/30528/wss/p2p/12D3KooWNohUjvJtGKUa8Vhy8C1ZBB5N8JATB6e7rdLVCioeb3ff" + "/dns/collectives-polkadot.bootnodes.polkadotters.com/tcp/30528/wss/p2p/12D3KooWNohUjvJtGKUa8Vhy8C1ZBB5N8JATB6e7rdLVCioeb3ff", + "/dns/boot-polkadot-collectives.luckyfriday.io/tcp/443/wss/p2p/12D3KooWCzifnPooTt4kvTnXT7FTKTymVL7xn7DURQLsS2AKpf6w" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/coretime-westend.json b/cumulus/parachains/chain-specs/coretime-westend.json index c79fd582348b..adb35b8a349f 100644 --- a/cumulus/parachains/chain-specs/coretime-westend.json +++ b/cumulus/parachains/chain-specs/coretime-westend.json @@ -4,7 +4,8 @@ "chainType": "Live", "bootNodes": [ "/dns/westend-coretime-collator-node-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWP93Dzk8T7GWxyWw9jhLcz8Pksokk3R9vL2eEH337bNkT", - "/dns/westend-coretime-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWMh2imeAzsZKGQgm2cv6Uoep3GBYtwGfujt1bs5YfVzkH" + "/dns/westend-coretime-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWMh2imeAzsZKGQgm2cv6Uoep3GBYtwGfujt1bs5YfVzkH", + "/dns/boot.metaspan.io/tcp/33019/p2p/12D3KooWCa1uNnEZqiqJY9jkKNQxwSLGPeZ5MjWHhjQMGwga9JMM" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/common/Cargo.toml b/cumulus/parachains/common/Cargo.toml index 7ef2eeb351ab..cfcf27ab68e0 100644 --- a/cumulus/parachains/common/Cargo.toml +++ b/cumulus/parachains/common/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"], default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-support = { path = "../../../substrate/frame/support", default-features = false } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml index f4f8b3603ba6..98762beb0cb2 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml @@ -17,7 +17,7 @@ sp-core = { path = "../../../../../../../../substrate/primitives/core", default- frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } # Cumulus -parachains-common = { path = "../../../../../../../parachains/common" } +parachains-common = { path = "../../../../../../common" } cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } emulated-integration-tests-common = { path = "../../../../common", default-features = false } asset-hub-rococo-runtime = { path = "../../../../../../runtimes/assets/asset-hub-rococo" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs index 2acccb9649b8..e5378b35f5e4 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs @@ -20,7 +20,7 @@ use sp_core::{sr25519, storage::Storage}; // Cumulus use emulated_integration_tests_common::{ accounts, build_genesis_storage, collators, get_account_id_from_seed, - PenpalSiblingSovereigAccount, PenpalTeleportableAssetLocation, RESERVABLE_ASSET_ID, + PenpalSiblingSovereignAccount, PenpalTeleportableAssetLocation, RESERVABLE_ASSET_ID, SAFE_XCM_VERSION, }; use parachains_common::{AccountId, Balance}; @@ -76,7 +76,7 @@ pub fn genesis() -> Storage { // Penpal's teleportable asset representation ( PenpalTeleportableAssetLocation::get(), - PenpalSiblingSovereigAccount::get(), + PenpalSiblingSovereignAccount::get(), true, ED, ), diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml index d4764f63bf64..a42a9abf618d 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml @@ -17,7 +17,7 @@ sp-core = { path = "../../../../../../../../substrate/primitives/core", default- frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } # Cumulus -parachains-common = { path = "../../../../../../../parachains/common" } +parachains-common = { path = "../../../../../../common" } cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } emulated-integration-tests-common = { path = "../../../../common", default-features = false } asset-hub-westend-runtime = { path = "../../../../../../runtimes/assets/asset-hub-westend" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs index e30529aff42c..219d1306906c 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs @@ -20,7 +20,7 @@ use sp_core::{sr25519, storage::Storage}; // Cumulus use emulated_integration_tests_common::{ accounts, build_genesis_storage, collators, get_account_id_from_seed, - PenpalSiblingSovereigAccount, PenpalTeleportableAssetLocation, RESERVABLE_ASSET_ID, + PenpalSiblingSovereignAccount, PenpalTeleportableAssetLocation, RESERVABLE_ASSET_ID, SAFE_XCM_VERSION, }; use parachains_common::{AccountId, Balance}; @@ -72,7 +72,7 @@ pub fn genesis() -> Storage { // Penpal's teleportable asset representation ( PenpalTeleportableAssetLocation::get(), - PenpalSiblingSovereigAccount::get(), + PenpalSiblingSovereignAccount::get(), true, ED, ), diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml index 322d8b44e6ea..789f10a35f26 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml @@ -17,7 +17,7 @@ sp-core = { path = "../../../../../../../../substrate/primitives/core", default- frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } # Cumulus -parachains-common = { path = "../../../../../../../parachains/common" } +parachains-common = { path = "../../../../../../common" } emulated-integration-tests-common = { path = "../../../../common", default-features = false } bridge-hub-rococo-runtime = { path = "../../../../../../runtimes/bridge-hubs/bridge-hub-rococo" } bridge-hub-common = { path = "../../../../../../runtimes/bridge-hubs/common", default-features = false } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml index ec1386b7f6e2..d82971cf55ae 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml @@ -17,7 +17,7 @@ sp-core = { path = "../../../../../../../../substrate/primitives/core", default- frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } # Cumulus -parachains-common = { path = "../../../../../../../parachains/common" } +parachains-common = { path = "../../../../../../common" } emulated-integration-tests-common = { path = "../../../../common", default-features = false } bridge-hub-westend-runtime = { path = "../../../../../../runtimes/bridge-hubs/bridge-hub-westend" } bridge-hub-common = { path = "../../../../../../runtimes/bridge-hubs/common", default-features = false } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/Cargo.toml index 03f755b666af..4c2a7d3c274d 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/Cargo.toml @@ -17,7 +17,7 @@ sp-core = { path = "../../../../../../../../substrate/primitives/core", default- frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } # Cumulus -parachains-common = { path = "../../../../../../../parachains/common" } +parachains-common = { path = "../../../../../../common" } cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } emulated-integration-tests-common = { path = "../../../../common", default-features = false } collectives-westend-runtime = { path = "../../../../../../runtimes/collectives/collectives-westend" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/Cargo.toml index 65a358d0ef2f..f7fe93d27775 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/Cargo.toml @@ -14,7 +14,7 @@ sp-core = { path = "../../../../../../../../substrate/primitives/core", default- frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } # Cumulus -parachains-common = { path = "../../../../../../../parachains/common" } +parachains-common = { path = "../../../../../../common" } cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } emulated-integration-tests-common = { path = "../../../../common", default-features = false } people-rococo-runtime = { path = "../../../../../../runtimes/people/people-rococo" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/Cargo.toml index 075698848bcf..57a767e0c2a3 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/Cargo.toml @@ -14,7 +14,7 @@ sp-core = { path = "../../../../../../../../substrate/primitives/core", default- frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } # Cumulus -parachains-common = { path = "../../../../../../../parachains/common" } +parachains-common = { path = "../../../../../../common" } cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } emulated-integration-tests-common = { path = "../../../../common", default-features = false } people-westend-runtime = { path = "../../../../../../runtimes/people/people-westend" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml index f47350b00eb1..2ac508273c61 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml @@ -20,7 +20,7 @@ frame-support = { path = "../../../../../../../../substrate/frame/support", defa xcm = { package = "staging-xcm", path = "../../../../../../../../polkadot/xcm", default-features = false } # Cumulus -parachains-common = { path = "../../../../../../../parachains/common" } +parachains-common = { path = "../../../../../../common" } cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } emulated-integration-tests-common = { path = "../../../../common", default-features = false } penpal-runtime = { path = "../../../../../../runtimes/testing/penpal" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs index 48901647fd05..d81ab8143ddb 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs @@ -31,8 +31,8 @@ pub const PARA_ID_B: u32 = 2001; pub const ED: Balance = penpal_runtime::EXISTENTIAL_DEPOSIT; parameter_types! { - pub PenpalSudoAcccount: AccountId = get_account_id_from_seed::("Alice"); - pub PenpalAssetOwner: AccountId = PenpalSudoAcccount::get(); + pub PenpalSudoAccount: AccountId = get_account_id_from_seed::("Alice"); + pub PenpalAssetOwner: AccountId = PenpalSudoAccount::get(); } pub fn genesis(para_id: u32) -> Storage { @@ -66,7 +66,7 @@ pub fn genesis(para_id: u32) -> Storage { safe_xcm_version: Some(SAFE_XCM_VERSION), ..Default::default() }, - sudo: penpal_runtime::SudoConfig { key: Some(PenpalSudoAcccount::get()) }, + sudo: penpal_runtime::SudoConfig { key: Some(PenpalSudoAccount::get()) }, assets: penpal_runtime::AssetsConfig { assets: vec![( penpal_runtime::xcm_config::TELEPORTABLE_ASSET_ID, diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs index 651b3a523067..0b49c7a3e091 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs @@ -14,7 +14,7 @@ // limitations under the License. mod genesis; -pub use genesis::{genesis, PenpalAssetOwner, PenpalSudoAcccount, ED, PARA_ID_A, PARA_ID_B}; +pub use genesis::{genesis, PenpalAssetOwner, PenpalSudoAccount, ED, PARA_ID_A, PARA_ID_B}; pub use penpal_runtime::xcm_config::{ CustomizableAssetFromSystemAssetHub, LocalTeleportableToAssetHub, XcmConfig, }; diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml index 2d27426cca75..7ac65b0ee1de 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml @@ -25,5 +25,5 @@ rococo-runtime-constants = { path = "../../../../../../../polkadot/runtime/rococ rococo-runtime = { path = "../../../../../../../polkadot/runtime/rococo" } # Cumulus -parachains-common = { path = "../../../../../../parachains/common" } +parachains-common = { path = "../../../../../common" } emulated-integration-tests-common = { path = "../../../common", default-features = false } diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml index abc40c204068..12a3ad60e0e0 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml @@ -11,7 +11,6 @@ publish = false workspace = true [dependencies] - # Substrate sp-core = { path = "../../../../../../../substrate/primitives/core", default-features = false } sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } @@ -27,5 +26,5 @@ westend-runtime-constants = { path = "../../../../../../../polkadot/runtime/west westend-runtime = { path = "../../../../../../../polkadot/runtime/westend" } # Cumulus -parachains-common = { path = "../../../../../../parachains/common" } +parachains-common = { path = "../../../../../common" } emulated-integration-tests-common = { path = "../../../common", default-features = false } diff --git a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs index 5fc08dff32c4..618c3addc5d0 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs @@ -114,7 +114,7 @@ where .expect("Bridge message does not exist") .into(); let payload = Vec::::decode(&mut &encoded_payload[..]) - .expect("Decodign XCM message failed"); + .expect("Decoding XCM message failed"); let id: u32 = LaneIdWrapper(*lane).into(); let message = BridgeMessage { id, nonce, payload }; @@ -265,7 +265,7 @@ macro_rules! impl_assert_events_helpers_for_relay_chain { $crate::impls::assert_expected_events!( Self, vec![ - // XCM is successfully received and proccessed + // XCM is successfully received and processed [<$chain RuntimeEvent>]::::MessageQueue($crate::impls::pallet_message_queue::Event::Processed { origin: $crate::impls::AggregateMessageOrigin::Ump($crate::impls::UmpQueueId::Para(id)), weight_used, @@ -343,7 +343,7 @@ macro_rules! impl_hrmp_channels_helpers_for_relay_chain { ::Runtime, >::contains_key(&channel_id); - // Check the HRMP channel has been successfully registrered + // Check the HRMP channel has been successfully registered assert!(hrmp_channel_exist) }); } @@ -362,7 +362,7 @@ macro_rules! impl_send_transact_helpers_for_relay_chain { recipient: $crate::impls::ParaId, call: $crate::impls::DoubleEncoded<()> ) { - use $crate::impls::{bx, Chain, RelayChain}; + use $crate::impls::{bx, Chain, RelayChain, Encode}; ::execute_with(|| { let root_origin = ::RuntimeOrigin::root(); @@ -370,10 +370,10 @@ macro_rules! impl_send_transact_helpers_for_relay_chain { let xcm = $crate::impls::xcm_transact_unpaid_execution(call, $crate::impls::OriginKind::Superuser); // Send XCM `Transact` - $crate::impls::assert_ok!(]>::XcmPallet::send( + $crate::impls::assert_ok!(]>::XcmPallet::send_blob( root_origin, bx!(destination.into()), - bx!(xcm), + xcm.encode().try_into().unwrap(), )); Self::assert_xcm_pallet_sent(); }); diff --git a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs index 40204ca297a0..cbde0642f1a2 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs @@ -67,7 +67,7 @@ parameter_types! { xcm::v3::Junction::GeneralIndex(TELEPORTABLE_ASSET_ID.into()), ] ); - pub PenpalSiblingSovereigAccount: AccountId = Sibling::from(PENPAL_ID).into_account_truncating(); + pub PenpalSiblingSovereignAccount: AccountId = Sibling::from(PENPAL_ID).into_account_truncating(); } /// Helper function to generate a crypto pair from seed diff --git a/cumulus/parachains/integration-tests/emulated/common/src/macros.rs b/cumulus/parachains/integration-tests/emulated/common/src/macros.rs index 6951de6faa72..6f6bbe41e01b 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/macros.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/macros.rs @@ -115,7 +115,7 @@ macro_rules! test_parachain_is_trusted_teleporter { let para_receiver_balance_after = <$receiver_para as $crate::macros::Chain>::account_data_of(receiver.clone()).free; let delivery_fees = <$sender_para>::execute_with(|| { - $crate::macros::asset_test_utils::xcm_helpers::transfer_assets_delivery_fees::< + $crate::macros::asset_test_utils::xcm_helpers::teleport_assets_delivery_fees::< <$sender_xcm_config as xcm_executor::Config>::XcmSender, >($assets.clone(), fee_asset_item, weight_limit.clone(), beneficiary, para_destination) }); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml index 13eb7d8dfc49..9b519da4b1d8 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml @@ -30,7 +30,7 @@ rococo-runtime = { path = "../../../../../../../polkadot/runtime/rococo" } # Cumulus asset-test-utils = { path = "../../../../../runtimes/assets/test-utils" } -parachains-common = { path = "../../../../../../parachains/common" } +parachains-common = { path = "../../../../../common" } cumulus-pallet-parachain-system = { path = "../../../../../../pallets/parachain-system", default-features = false } testnet-parachains-constants = { path = "../../../../../runtimes/constants", features = ["rococo"] } asset-hub-rococo-runtime = { path = "../../../../../runtimes/assets/asset-hub-rococo" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs index 21d858f1fe51..a5a4914e21d8 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs @@ -63,17 +63,13 @@ mod imports { // Runtimes pub use asset_hub_rococo_runtime::xcm_config::{ - TokenLocation as RelayLocation, UniversalLocation as AssetHubRococoUniversalLocation, - XcmConfig as AssetHubRococoXcmConfig, + TokenLocation as RelayLocation, XcmConfig as AssetHubRococoXcmConfig, }; pub use penpal_runtime::xcm_config::{ LocalReservableFromAssetHub as PenpalLocalReservableFromAssetHub, LocalTeleportableToAssetHub as PenpalLocalTeleportableToAssetHub, - UniversalLocation as PenpalUniversalLocation, XcmConfig as PenpalRococoXcmConfig, - }; - pub use rococo_runtime::xcm_config::{ - UniversalLocation as RococoUniversalLocation, XcmConfig as RococoXcmConfig, }; + pub use rococo_runtime::xcm_config::XcmConfig as RococoXcmConfig; pub const ASSET_ID: u32 = 3; pub const ASSET_MIN_BALANCE: u128 = 1000; diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs index 705c9613b647..a0738839087a 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs @@ -524,7 +524,6 @@ fn reserve_transfer_native_asset_from_relay_to_para() { let destination = Rococo::child_location_of(PenpalA::para_id()); let sender = RococoSender::get(); let amount_to_send: Balance = ROCOCO_ED * 1000; - let assets: Assets = (Here, amount_to_send).into(); // Init values fot Parachain let relay_native_asset_location = @@ -552,15 +551,6 @@ fn reserve_transfer_native_asset_from_relay_to_para() { test.set_dispatchable::(relay_to_para_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = Rococo::execute_with(|| { - let reanchored_assets = - assets.reanchored(&destination, &RococoUniversalLocation::get()).unwrap(); - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(reanchored_assets, 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_balance_after = test.sender.balance; let receiver_assets_after = PenpalA::execute_with(|| { @@ -568,8 +558,8 @@ fn reserve_transfer_native_asset_from_relay_to_para() { >::balance(relay_native_asset_location.into(), &receiver) }); - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_balance_after < sender_balance_before - amount_to_send); // Receiver's asset balance is increased assert!(receiver_assets_after > receiver_assets_before); // Receiver's asset balance increased by `amount_to_send - delivery_fees - bought_execution`; @@ -595,7 +585,7 @@ fn reserve_transfer_native_asset_from_para_to_relay() { ::RuntimeOrigin::signed(asset_owner), relay_native_asset_location, sender.clone(), - amount_to_send, + amount_to_send * 2, ); // Init values for Relay @@ -634,15 +624,6 @@ fn reserve_transfer_native_asset_from_para_to_relay() { test.set_dispatchable::(para_to_relay_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = PenpalA::execute_with(|| { - let reanchored_assets = - assets.reanchored(&destination, &PenpalUniversalLocation::get()).unwrap(); - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(reanchored_assets, 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; @@ -650,8 +631,8 @@ fn reserve_transfer_native_asset_from_para_to_relay() { }); let receiver_balance_after = test.receiver.balance; - // Sender's balance is reduced - assert_eq!(sender_assets_before - amount_to_send - delivery_fees, sender_assets_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_assets_after < sender_assets_before - amount_to_send); // Receiver's asset balance is increased assert!(receiver_balance_after > receiver_balance_before); // Receiver's asset balance increased by `amount_to_send - delivery_fees - bought_execution`; @@ -705,16 +686,6 @@ fn reserve_transfer_native_asset_from_system_para_to_para() { test.set_dispatchable::(system_para_to_para_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = AssetHubRococo::execute_with(|| { - let reanchored_assets = assets - .reanchored(&destination, &AssetHubRococoUniversalLocation::get()) - .unwrap(); - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(reanchored_assets, 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_balance_after = test.sender.balance; let receiver_assets_after = PenpalA::execute_with(|| { @@ -722,8 +693,8 @@ fn reserve_transfer_native_asset_from_system_para_to_para() { >::balance(system_para_native_asset_location, &receiver) }); - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_balance_after < sender_balance_before - amount_to_send); // Receiver's assets is increased assert!(receiver_assets_after > receiver_assets_before); // Receiver's assets increased by `amount_to_send - delivery_fees - bought_execution`; @@ -738,7 +709,7 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { // Init values for Parachain let destination = PenpalA::sibling_location_of(AssetHubRococo::para_id()); let sender = PenpalASender::get(); - let amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 1000; + let amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 10000; let assets: Assets = (Parent, amount_to_send).into(); let system_para_native_asset_location = v3::Location::try_from(RelayLocation::get()).expect("conversion works"); @@ -749,7 +720,7 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { ::RuntimeOrigin::signed(asset_owner), system_para_native_asset_location, sender.clone(), - amount_to_send, + amount_to_send * 2, ); // Init values for System Parachain @@ -788,15 +759,6 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { test.set_dispatchable::(para_to_system_para_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = PenpalA::execute_with(|| { - let reanchored_assets = - assets.reanchored(&destination, &PenpalUniversalLocation::get()).unwrap(); - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(reanchored_assets, 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; @@ -804,8 +766,8 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { }); let receiver_balance_after = test.receiver.balance; - // Sender's balance is reduced - assert_eq!(sender_assets_before - amount_to_send - delivery_fees, sender_assets_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_assets_after < sender_assets_before - amount_to_send); // Receiver's balance is increased assert!(receiver_balance_after > receiver_balance_before); // Receiver's balance increased by `amount_to_send - delivery_fees - bought_execution`; @@ -1084,13 +1046,13 @@ fn reserve_transfer_native_asset_from_para_to_para_trough_relay() { ::RuntimeOrigin::signed(asset_owner), relay_native_asset_location, sender.clone(), - amount_to_send, + amount_to_send * 2, ); // fund the Parachain Origin's SA on Relay Chain with the native tokens held in reserve Rococo::fund_accounts(vec![(sov_of_sender_on_relay.into(), amount_to_send * 2)]); - // Init values for Parachain Desitnation + // Init values for Parachain Destination let receiver = PenpalBReceiver::get(); // Init Test @@ -1118,13 +1080,6 @@ fn reserve_transfer_native_asset_from_para_to_para_trough_relay() { test.set_dispatchable::(para_to_para_through_relay_limited_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = PenpalA::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; @@ -1135,8 +1090,8 @@ fn reserve_transfer_native_asset_from_para_to_para_trough_relay() { >::balance(relay_native_asset_location, &receiver) }); - // Sender's balance is reduced - assert_eq!(sender_assets_before - amount_to_send - delivery_fees, sender_assets_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_assets_after < sender_assets_before - amount_to_send); // Receiver's balance is increased assert!(receiver_assets_after > receiver_assets_before); } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs index 364fbd0d439f..1d120f1dc4c7 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs @@ -75,10 +75,10 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_system_assets_works() { )]); PenpalA::execute_with(|| { - assert_ok!(::PolkadotXcm::send( + assert_ok!(::PolkadotXcm::send_blob( root_origin, bx!(system_para_destination), - bx!(xcm), + xcm.encode().try_into().unwrap(), )); PenpalA::assert_xcm_pallet_sent(); @@ -159,10 +159,10 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_assets_works() { )]); PenpalA::execute_with(|| { - assert_ok!(::PolkadotXcm::send( + assert_ok!(::PolkadotXcm::send_blob( root_origin, bx!(system_para_destination), - bx!(xcm), + xcm.encode().try_into().unwrap(), )); PenpalA::assert_xcm_pallet_sent(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs index 87f0b3d9f90a..e13300b7c114 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs @@ -370,10 +370,10 @@ fn pay_xcm_fee_with_some_asset_swapped_for_native() { penpal.clone(), ); - assert_ok!(::PolkadotXcm::send( + assert_ok!(::PolkadotXcm::send_blob( penpal_root, bx!(asset_hub_location), - bx!(xcm), + xcm.encode().try_into().unwrap(), )); PenpalA::assert_xcm_pallet_sent(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs index 0cc5ddb9f64d..1cbb7fb8c193 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs @@ -245,16 +245,6 @@ fn relay_limited_teleport_assets(t: RelayToSystemParaTest) -> DispatchResult { ) } -fn relay_teleport_assets(t: RelayToSystemParaTest) -> DispatchResult { - ::XcmPallet::teleport_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - ) -} - fn system_para_limited_teleport_assets(t: SystemParaToRelayTest) -> DispatchResult { ::PolkadotXcm::limited_teleport_assets( t.signed_origin, @@ -266,16 +256,6 @@ fn system_para_limited_teleport_assets(t: SystemParaToRelayTest) -> DispatchResu ) } -fn system_para_teleport_assets(t: SystemParaToRelayTest) -> DispatchResult { - ::PolkadotXcm::teleport_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - ) -} - fn para_to_system_para_transfer_assets(t: ParaToSystemParaTest) -> DispatchResult { ::PolkadotXcm::transfer_assets( t.signed_origin, @@ -322,7 +302,7 @@ fn limited_teleport_native_assets_from_relay_to_system_para_works() { test.assert(); let delivery_fees = Rococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -369,7 +349,7 @@ fn limited_teleport_native_assets_back_from_system_para_to_relay_works() { let receiver_balance_after = test.receiver.balance; let delivery_fees = AssetHubRococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -410,7 +390,7 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { let receiver_balance_after = test.receiver.balance; let delivery_fees = AssetHubRococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -421,129 +401,6 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { assert_eq!(receiver_balance_after, receiver_balance_before); } -/// Teleport of native asset from Relay Chain to the System Parachain should work -#[test] -fn teleport_native_assets_from_relay_to_system_para_works() { - // Init values for Relay Chain - let amount_to_send: Balance = ROCOCO_ED * 1000; - let dest = Rococo::child_location_of(AssetHubRococo::para_id()); - let beneficiary_id = AssetHubRococoReceiver::get(); - let test_args = TestContext { - sender: RococoSender::get(), - receiver: AssetHubRococoReceiver::get(), - args: TestArgs::new_relay(dest, beneficiary_id, amount_to_send), - }; - - let mut test = RelayToSystemParaTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(relay_origin_assertions); - test.set_assertion::(para_dest_assertions); - test.set_dispatchable::(relay_teleport_assets); - test.assert(); - - let delivery_fees = Rococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // Receiver's balance is increased - assert!(receiver_balance_after > receiver_balance_before); -} - -/// Teleport of native asset from System Parachains to the Relay Chain -/// should work when there is enough balance in Relay Chain's `CheckAccount` -#[test] -fn teleport_native_assets_back_from_system_para_to_relay_works() { - // Dependency - Relay Chain's `CheckAccount` should have enough balance - teleport_native_assets_from_relay_to_system_para_works(); - - // Init values for Relay Chain - let amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 1000; - let destination = AssetHubRococo::parent_location(); - let beneficiary_id = RococoReceiver::get(); - let assets = (Parent, amount_to_send).into(); - - let test_args = TestContext { - sender: AssetHubRococoSender::get(), - receiver: RococoReceiver::get(), - args: TestArgs::new_para(destination, beneficiary_id, amount_to_send, assets, None, 0), - }; - - let mut test = SystemParaToRelayTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(para_origin_assertions); - test.set_assertion::(relay_dest_assertions); - test.set_dispatchable::(system_para_teleport_assets); - test.assert(); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - let delivery_fees = AssetHubRococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // Receiver's balance is increased - assert!(receiver_balance_after > receiver_balance_before); -} - -/// Teleport of native asset from System Parachain to Relay Chain -/// shouldn't work when there is not enough balance in Relay Chain's `CheckAccount` -#[test] -fn teleport_native_assets_from_system_para_to_relay_fails() { - // Init values for Relay Chain - let amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 1000; - let destination = AssetHubRococo::parent_location(); - let beneficiary_id = RococoReceiver::get(); - let assets = (Parent, amount_to_send).into(); - - let test_args = TestContext { - sender: AssetHubRococoSender::get(), - receiver: RococoReceiver::get(), - args: TestArgs::new_para(destination, beneficiary_id, amount_to_send, assets, None, 0), - }; - - let mut test = SystemParaToRelayTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(para_origin_assertions); - test.set_assertion::(relay_dest_assertions_fail); - test.set_dispatchable::(system_para_teleport_assets); - test.assert(); - - let delivery_fees = AssetHubRococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // Receiver's balance does not change - assert_eq!(receiver_balance_after, receiver_balance_before); -} - #[test] fn teleport_to_other_system_parachains_works() { let amount = ASSET_HUB_ROCOCO_ED * 100; @@ -593,7 +450,7 @@ fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { ::RuntimeOrigin::signed(asset_owner.clone()), system_para_native_asset_location, sender.clone(), - fee_amount_to_send, + fee_amount_to_send * 2, ); // No need to create the asset (only mint) as it exists in genesis. PenpalA::mint_asset( diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml index 8ac8efb5218f..3121ed028eb9 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml @@ -31,12 +31,12 @@ pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-fe westend-runtime = { path = "../../../../../../../polkadot/runtime/westend" } # Cumulus -parachains-common = { path = "../../../../../../parachains/common" } +parachains-common = { path = "../../../../../common" } testnet-parachains-constants = { path = "../../../../../runtimes/constants", features = ["westend"] } penpal-runtime = { path = "../../../../../runtimes/testing/penpal" } asset-hub-westend-runtime = { path = "../../../../../runtimes/assets/asset-hub-westend" } asset-test-utils = { path = "../../../../../runtimes/assets/test-utils" } -cumulus-pallet-xcmp-queue = { default-features = false, path = "../../../../../../pallets/xcmp-queue" } -cumulus-pallet-parachain-system = { default-features = false, path = "../../../../../../pallets/parachain-system" } +cumulus-pallet-xcmp-queue = { path = "../../../../../../pallets/xcmp-queue", default-features = false } +cumulus-pallet-parachain-system = { path = "../../../../../../pallets/parachain-system", default-features = false } emulated-integration-tests-common = { path = "../../../common", default-features = false } westend-system-emulated-network = { path = "../../../networks/westend-system" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs index 3f899d1dbdbc..c9f5fe0647e1 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs @@ -67,17 +67,13 @@ mod imports { // Runtimes pub use asset_hub_westend_runtime::xcm_config::{ - UniversalLocation as AssetHubWestendUniversalLocation, WestendLocation as RelayLocation, - XcmConfig as AssetHubWestendXcmConfig, + WestendLocation as RelayLocation, XcmConfig as AssetHubWestendXcmConfig, }; pub use penpal_runtime::xcm_config::{ LocalReservableFromAssetHub as PenpalLocalReservableFromAssetHub, LocalTeleportableToAssetHub as PenpalLocalTeleportableToAssetHub, - UniversalLocation as PenpalUniversalLocation, XcmConfig as PenpalWestendXcmConfig, - }; - pub use westend_runtime::xcm_config::{ - UniversalLocation as WestendUniversalLocation, XcmConfig as WestendXcmConfig, }; + pub use westend_runtime::xcm_config::XcmConfig as WestendXcmConfig; pub const ASSET_ID: u32 = 3; pub const ASSET_MIN_BALANCE: u128 = 1000; diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs index 8c836132b546..a26dfef8e8e7 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs @@ -215,7 +215,7 @@ fn para_to_system_para_assets_sender_assertions(t: ParaToSystemParaTest) { let system_para_native_asset_location = v3::Location::try_from(RelayLocation::get()).expect("conversion works"); let reservable_asset_location = - v3::Location::try_from(PenpalLocalReservableFromAssetHub::get()).expect("coversion works"); + v3::Location::try_from(PenpalLocalReservableFromAssetHub::get()).expect("conversion works"); PenpalA::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts(864_610_000, 8799))); assert_expected_events!( PenpalA, @@ -246,7 +246,7 @@ fn para_to_system_para_assets_sender_assertions(t: ParaToSystemParaTest) { fn system_para_to_para_assets_receiver_assertions(t: SystemParaToParaTest) { type RuntimeEvent = ::RuntimeEvent; let system_para_asset_location = - v3::Location::try_from(PenpalLocalReservableFromAssetHub::get()).expect("coversion works"); + v3::Location::try_from(PenpalLocalReservableFromAssetHub::get()).expect("conversion works"); PenpalA::assert_xcmp_queue_success(None); assert_expected_events!( PenpalA, @@ -524,7 +524,6 @@ fn reserve_transfer_native_asset_from_relay_to_para() { let destination = Westend::child_location_of(PenpalA::para_id()); let sender = WestendSender::get(); let amount_to_send: Balance = WESTEND_ED * 1000; - let assets: Assets = (Here, amount_to_send).into(); // Init values fot Parachain let relay_native_asset_location = @@ -552,15 +551,6 @@ fn reserve_transfer_native_asset_from_relay_to_para() { test.set_dispatchable::(relay_to_para_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = Westend::execute_with(|| { - let reanchored_assets = - assets.reanchored(&destination, &WestendUniversalLocation::get()).unwrap(); - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(reanchored_assets, 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_balance_after = test.sender.balance; let receiver_assets_after = PenpalA::execute_with(|| { @@ -568,8 +558,8 @@ fn reserve_transfer_native_asset_from_relay_to_para() { >::balance(relay_native_asset_location.into(), &receiver) }); - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_balance_after < sender_balance_before - amount_to_send); // Receiver's asset balance is increased assert!(receiver_assets_after > receiver_assets_before); // Receiver's asset balance increased by `amount_to_send - delivery_fees - bought_execution`; @@ -595,7 +585,7 @@ fn reserve_transfer_native_asset_from_para_to_relay() { ::RuntimeOrigin::signed(asset_owner), relay_native_asset_location, sender.clone(), - amount_to_send, + amount_to_send * 2, ); // Init values for Relay @@ -634,15 +624,6 @@ fn reserve_transfer_native_asset_from_para_to_relay() { test.set_dispatchable::(para_to_relay_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = PenpalA::execute_with(|| { - let reanchored_assets = - assets.reanchored(&destination, &PenpalUniversalLocation::get()).unwrap(); - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(reanchored_assets, 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; @@ -650,8 +631,8 @@ fn reserve_transfer_native_asset_from_para_to_relay() { }); let receiver_balance_after = test.receiver.balance; - // Sender's balance is reduced - assert_eq!(sender_assets_before - amount_to_send - delivery_fees, sender_assets_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_assets_after < sender_assets_before - amount_to_send); // Receiver's asset balance is increased assert!(receiver_balance_after > receiver_balance_before); // Receiver's asset balance increased by `amount_to_send - delivery_fees - bought_execution`; @@ -705,16 +686,6 @@ fn reserve_transfer_native_asset_from_system_para_to_para() { test.set_dispatchable::(system_para_to_para_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = AssetHubWestend::execute_with(|| { - let reanchored_assets = assets - .reanchored(&destination, &AssetHubWestendUniversalLocation::get()) - .unwrap(); - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(reanchored_assets, 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_balance_after = test.sender.balance; let receiver_assets_after = PenpalA::execute_with(|| { @@ -722,8 +693,8 @@ fn reserve_transfer_native_asset_from_system_para_to_para() { >::balance(system_para_native_asset_location, &receiver) }); - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_balance_after < sender_balance_before - amount_to_send); // Receiver's assets is increased assert!(receiver_assets_after > receiver_assets_before); // Receiver's assets increased by `amount_to_send - delivery_fees - bought_execution`; @@ -749,7 +720,7 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { ::RuntimeOrigin::signed(asset_owner), system_para_native_asset_location, sender.clone(), - amount_to_send, + amount_to_send * 2, ); // Init values for System Parachain @@ -789,15 +760,6 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { test.set_dispatchable::(para_to_system_para_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = PenpalA::execute_with(|| { - let reanchored_assets = - assets.reanchored(&destination, &PenpalUniversalLocation::get()).unwrap(); - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(reanchored_assets, 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; @@ -805,8 +767,8 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { }); let receiver_balance_after = test.receiver.balance; - // Sender's balance is reduced - assert_eq!(sender_assets_before - amount_to_send - delivery_fees, sender_assets_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_assets_after < sender_assets_before - amount_to_send); // Receiver's balance is increased assert!(receiver_balance_after > receiver_balance_before); // Receiver's balance increased by `amount_to_send - delivery_fees - bought_execution`; @@ -1086,13 +1048,13 @@ fn reserve_transfer_native_asset_from_para_to_para_trough_relay() { ::RuntimeOrigin::signed(asset_owner), relay_native_asset_location, sender.clone(), - amount_to_send, + amount_to_send * 2, ); // fund the Parachain Origin's SA on Relay Chain with the native tokens held in reserve Westend::fund_accounts(vec![(sov_of_sender_on_relay.into(), amount_to_send * 2)]); - // Init values for Parachain Desitnation + // Init values for Parachain Destination let receiver = PenpalBReceiver::get(); // Init Test @@ -1120,13 +1082,6 @@ fn reserve_transfer_native_asset_from_para_to_para_trough_relay() { test.set_dispatchable::(para_to_para_through_relay_limited_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = PenpalA::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; @@ -1137,8 +1092,8 @@ fn reserve_transfer_native_asset_from_para_to_para_trough_relay() { >::balance(relay_native_asset_location, &receiver) }); - // Sender's balance is reduced - assert_eq!(sender_assets_before - amount_to_send - delivery_fees, sender_assets_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_assets_after < sender_assets_before - amount_to_send); // Receiver's balance is increased assert!(receiver_assets_after > receiver_assets_before); } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs index eb0e985cc0ce..f218b539c387 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs @@ -75,10 +75,10 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_system_assets_works() { )]); PenpalA::execute_with(|| { - assert_ok!(::PolkadotXcm::send( + assert_ok!(::PolkadotXcm::send_blob( root_origin, bx!(system_para_destination), - bx!(xcm), + xcm.encode().try_into().unwrap(), )); PenpalA::assert_xcm_pallet_sent(); @@ -159,10 +159,10 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_assets_works() { )]); PenpalA::execute_with(|| { - assert_ok!(::PolkadotXcm::send( + assert_ok!(::PolkadotXcm::send_blob( root_origin, bx!(system_para_destination), - bx!(xcm), + xcm.encode().try_into().unwrap(), )); PenpalA::assert_xcm_pallet_sent(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs index 04740d311583..aa673c03483a 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs @@ -369,10 +369,10 @@ fn pay_xcm_fee_with_some_asset_swapped_for_native() { penpal.clone(), ); - assert_ok!(::PolkadotXcm::send( + assert_ok!(::PolkadotXcm::send_blob( penpal_root, bx!(asset_hub_location), - bx!(xcm), + xcm.encode().try_into().unwrap(), )); PenpalA::assert_xcm_pallet_sent(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs index 61f547fe7c5e..ac518d2ed4a4 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs @@ -245,16 +245,6 @@ fn relay_limited_teleport_assets(t: RelayToSystemParaTest) -> DispatchResult { ) } -fn relay_teleport_assets(t: RelayToSystemParaTest) -> DispatchResult { - ::XcmPallet::teleport_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - ) -} - fn system_para_limited_teleport_assets(t: SystemParaToRelayTest) -> DispatchResult { ::PolkadotXcm::limited_teleport_assets( t.signed_origin, @@ -266,16 +256,6 @@ fn system_para_limited_teleport_assets(t: SystemParaToRelayTest) -> DispatchResu ) } -fn system_para_teleport_assets(t: SystemParaToRelayTest) -> DispatchResult { - ::PolkadotXcm::teleport_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - ) -} - fn para_to_system_para_transfer_assets(t: ParaToSystemParaTest) -> DispatchResult { ::PolkadotXcm::transfer_assets( t.signed_origin, @@ -322,7 +302,7 @@ fn limited_teleport_native_assets_from_relay_to_system_para_works() { test.assert(); let delivery_fees = Westend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -369,7 +349,7 @@ fn limited_teleport_native_assets_back_from_system_para_to_relay_works() { let receiver_balance_after = test.receiver.balance; let delivery_fees = AssetHubWestend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -410,7 +390,7 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { let receiver_balance_after = test.receiver.balance; let delivery_fees = AssetHubWestend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -421,129 +401,6 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { assert_eq!(receiver_balance_after, receiver_balance_before); } -/// Teleport of native asset from Relay Chain to the System Parachain should work -#[test] -fn teleport_native_assets_from_relay_to_system_para_works() { - // Init values for Relay Chain - let amount_to_send: Balance = WESTEND_ED * 1000; - let dest = Westend::child_location_of(AssetHubWestend::para_id()); - let beneficiary_id = AssetHubWestendReceiver::get(); - let test_args = TestContext { - sender: WestendSender::get(), - receiver: AssetHubWestendReceiver::get(), - args: TestArgs::new_relay(dest, beneficiary_id, amount_to_send), - }; - - let mut test = RelayToSystemParaTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(relay_origin_assertions); - test.set_assertion::(para_dest_assertions); - test.set_dispatchable::(relay_teleport_assets); - test.assert(); - - let delivery_fees = Westend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // Receiver's balance is increased - assert!(receiver_balance_after > receiver_balance_before); -} - -/// Teleport of native asset from System Parachains to the Relay Chain -/// should work when there is enough balance in Relay Chain's `CheckAccount` -#[test] -fn teleport_native_assets_back_from_system_para_to_relay_works() { - // Dependency - Relay Chain's `CheckAccount` should have enough balance - teleport_native_assets_from_relay_to_system_para_works(); - - // Init values for Relay Chain - let amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000; - let destination = AssetHubWestend::parent_location(); - let beneficiary_id = WestendReceiver::get(); - let assets = (Parent, amount_to_send).into(); - - let test_args = TestContext { - sender: AssetHubWestendSender::get(), - receiver: WestendReceiver::get(), - args: TestArgs::new_para(destination, beneficiary_id, amount_to_send, assets, None, 0), - }; - - let mut test = SystemParaToRelayTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(para_origin_assertions); - test.set_assertion::(relay_dest_assertions); - test.set_dispatchable::(system_para_teleport_assets); - test.assert(); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - let delivery_fees = AssetHubWestend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // Receiver's balance is increased - assert!(receiver_balance_after > receiver_balance_before); -} - -/// Teleport of native asset from System Parachain to Relay Chain -/// shouldn't work when there is not enough balance in Relay Chain's `CheckAccount` -#[test] -fn teleport_native_assets_from_system_para_to_relay_fails() { - // Init values for Relay Chain - let amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000; - let destination = AssetHubWestend::parent_location(); - let beneficiary_id = WestendReceiver::get(); - let assets = (Parent, amount_to_send).into(); - - let test_args = TestContext { - sender: AssetHubWestendSender::get(), - receiver: WestendReceiver::get(), - args: TestArgs::new_para(destination, beneficiary_id, amount_to_send, assets, None, 0), - }; - - let mut test = SystemParaToRelayTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(para_origin_assertions); - test.set_assertion::(relay_dest_assertions_fail); - test.set_dispatchable::(system_para_teleport_assets); - test.assert(); - - let delivery_fees = AssetHubWestend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // Receiver's balance does not change - assert_eq!(receiver_balance_after, receiver_balance_before); -} - #[test] fn teleport_to_other_system_parachains_works() { let amount = ASSET_HUB_WESTEND_ED * 100; @@ -593,7 +450,7 @@ fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { ::RuntimeOrigin::signed(asset_owner.clone()), system_para_native_asset_location, sender.clone(), - fee_amount_to_send, + fee_amount_to_send * 2, ); // No need to create the asset (only mint) as it exists in genesis. PenpalA::mint_asset( diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml index 89f0d2a9ca6d..010c252658c0 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } hex-literal = "0.4.1" # Substrate @@ -34,10 +34,10 @@ pallet-bridge-messages = { path = "../../../../../../../bridges/modules/messages bp-messages = { path = "../../../../../../../bridges/primitives/messages", default-features = false } # Cumulus -parachains-common = { path = "../../../../../../parachains/common" } +parachains-common = { path = "../../../../../common" } testnet-parachains-constants = { path = "../../../../../runtimes/constants", features = ["rococo"] } cumulus-pallet-xcmp-queue = { path = "../../../../../../pallets/xcmp-queue", default-features = false } -bridge-hub-rococo-runtime = { path = "../../../../../../parachains/runtimes/bridge-hubs/bridge-hub-rococo", default-features = false } +bridge-hub-rococo-runtime = { path = "../../../../../runtimes/bridge-hubs/bridge-hub-rococo", default-features = false } emulated-integration-tests-common = { path = "../../../common", default-features = false } rococo-westend-system-emulated-network = { path = "../../../networks/rococo-westend-system" } rococo-system-emulated-network = { path = "../../../networks/rococo-system" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs index a1d871cdb618..4bd041dc03f4 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs @@ -14,6 +14,7 @@ // limitations under the License. use crate::tests::*; +use codec::Encode; #[test] fn send_xcm_from_rococo_relay_to_westend_asset_hub_should_fail_on_not_applicable() { @@ -26,7 +27,7 @@ fn send_xcm_from_rococo_relay_to_westend_asset_hub_should_fail_on_not_applicable let remote_xcm = Xcm(vec![ClearOrigin]); - let xcm = VersionedXcm::from(Xcm(vec![ + let xcm = VersionedXcm::from(Xcm::<()>(vec![ UnpaidExecution { weight_limit, check_origin }, ExportMessage { network: WestendId.into(), @@ -38,10 +39,10 @@ fn send_xcm_from_rococo_relay_to_westend_asset_hub_should_fail_on_not_applicable // Rococo Global Consensus // Send XCM message from Relay Chain to Bridge Hub source Parachain Rococo::execute_with(|| { - assert_ok!(::XcmPallet::send( + assert_ok!(::XcmPallet::send_blob( sudo_origin, bx!(destination), - bx!(xcm), + xcm.encode().try_into().unwrap(), )); type RuntimeEvent = ::RuntimeEvent; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs index 1e74d63e1d52..780ba57f78a1 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs @@ -20,17 +20,17 @@ use frame_support::pallet_prelude::TypeInfo; use hex_literal::hex; use rococo_system_emulated_network::penpal_emulated_chain::CustomizableAssetFromSystemAssetHub; use rococo_westend_system_emulated_network::BridgeHubRococoParaSender as BridgeHubRococoSender; -use snowbridge_core::outbound::OperatingMode; +use snowbridge_core::{inbound::InboundQueueFixture, outbound::OperatingMode}; use snowbridge_pallet_inbound_queue_fixtures::{ - register_token::make_register_token_message, - register_token_with_insufficient_fee::make_register_token_with_infufficient_fee_message, - send_token::make_send_token_message, send_token_to_penpal::make_send_token_to_penpal_message, - InboundQueueFixture, + register_token::make_register_token_message, send_token::make_send_token_message, + send_token_to_penpal::make_send_token_to_penpal_message, }; use snowbridge_pallet_system; -use snowbridge_router_primitives::inbound::GlobalConsensusEthereumConvertsFor; +use snowbridge_router_primitives::inbound::{ + Command, GlobalConsensusEthereumConvertsFor, MessageV1, VersionedMessage, +}; use sp_core::H256; -use sp_runtime::{ArithmeticError::Underflow, DispatchError::Arithmetic}; +use sp_runtime::{DispatchError::Token, TokenError::FundsUnavailable}; use testnet_parachains_constants::rococo::snowbridge::EthereumNetwork; const INITIAL_FUND: u128 = 5_000_000_000 * ROCOCO_ED; @@ -39,6 +39,7 @@ const TREASURY_ACCOUNT: [u8; 32] = hex!("6d6f646c70792f74727372790000000000000000000000000000000000000000"); const WETH: [u8; 20] = hex!("87d1f7fdfEe7f651FaBc8bFCB6E086C278b77A7d"); const ETHEREUM_DESTINATION_ADDRESS: [u8; 20] = hex!("44a57ee2f2FCcb85FDa2B0B18EBD0D8D2333700e"); +const INSUFFICIENT_XCM_FEE: u128 = 1000; #[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] pub enum ControlCall { @@ -56,13 +57,11 @@ pub enum SnowbridgeControl { } pub fn send_inbound_message(fixture: InboundQueueFixture) -> DispatchResult { - EthereumBeaconClient::store_execution_header( - fixture.message.proof.block_hash, - fixture.execution_header, - 0, - H256::default(), - ); - + EthereumBeaconClient::store_finalized_header( + fixture.finalized_header, + fixture.block_roots_root, + ) + .unwrap(); EthereumInboundQueue::submit( RuntimeOrigin::signed(BridgeHubRococoSender::get()), fixture.message, @@ -83,7 +82,7 @@ fn create_agent() { let create_agent_call = SnowbridgeControl::Control(ControlCall::CreateAgent {}); // Construct XCM to create an agent for para 1001 - let remote_xcm = VersionedXcm::from(Xcm(vec![ + let remote_xcm = VersionedXcm::from(Xcm::<()>(vec![ UnpaidExecution { weight_limit: Unlimited, check_origin: None }, DescendOrigin(Parachain(origin_para).into()), Transact { @@ -96,10 +95,10 @@ fn create_agent() { // Rococo Global Consensus // Send XCM message from Relay Chain to Bridge Hub source Parachain Rococo::execute_with(|| { - assert_ok!(::XcmPallet::send( + assert_ok!(::XcmPallet::send_blob( sudo_origin, bx!(destination), - bx!(remote_xcm), + remote_xcm.encode().try_into().unwrap(), )); type RuntimeEvent = ::RuntimeEvent; @@ -141,7 +140,7 @@ fn create_channel() { let create_agent_call = SnowbridgeControl::Control(ControlCall::CreateAgent {}); // Construct XCM to create an agent for para 1001 - let create_agent_xcm = VersionedXcm::from(Xcm(vec![ + let create_agent_xcm = VersionedXcm::from(Xcm::<()>(vec![ UnpaidExecution { weight_limit: Unlimited, check_origin: None }, DescendOrigin(Parachain(origin_para).into()), Transact { @@ -154,7 +153,7 @@ fn create_channel() { let create_channel_call = SnowbridgeControl::Control(ControlCall::CreateChannel { mode: OperatingMode::Normal }); // Construct XCM to create a channel for para 1001 - let create_channel_xcm = VersionedXcm::from(Xcm(vec![ + let create_channel_xcm = VersionedXcm::from(Xcm::<()>(vec![ UnpaidExecution { weight_limit: Unlimited, check_origin: None }, DescendOrigin(Parachain(origin_para).into()), Transact { @@ -167,16 +166,16 @@ fn create_channel() { // Rococo Global Consensus // Send XCM message from Relay Chain to Bridge Hub source Parachain Rococo::execute_with(|| { - assert_ok!(::XcmPallet::send( + assert_ok!(::XcmPallet::send_blob( sudo_origin.clone(), bx!(destination.clone()), - bx!(create_agent_xcm), + create_agent_xcm.encode().try_into().unwrap(), )); - assert_ok!(::XcmPallet::send( + assert_ok!(::XcmPallet::send_blob( sudo_origin, bx!(destination), - bx!(create_channel_xcm), + create_channel_xcm.encode().try_into().unwrap(), )); type RuntimeEvent = ::RuntimeEvent; @@ -237,6 +236,46 @@ fn register_weth_token_from_ethereum_to_asset_hub() { }); } +/// Tests the registering of a token as an asset on AssetHub, and then subsequently sending +/// a token from Ethereum to AssetHub. +#[test] +fn send_token_from_ethereum_to_asset_hub() { + BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id().into(), INITIAL_FUND); + + // Fund ethereum sovereign on AssetHub + AssetHubRococo::fund_accounts(vec![(AssetHubRococoReceiver::get(), INITIAL_FUND)]); + + BridgeHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + // Construct RegisterToken message and sent to inbound queue + send_inbound_message(make_register_token_message()).unwrap(); + + // Construct SendToken message and sent to inbound queue + send_inbound_message(make_send_token_message()).unwrap(); + + // Check that the message was sent + assert_expected_events!( + BridgeHubRococo, + vec![ + RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {}, + ] + ); + }); + + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + // Check that the token was received and issued as a foreign asset on AssetHub + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { .. }) => {}, + ] + ); + }); +} + /// Tests sending a token to a 3rd party parachain, called PenPal. The token reserve is /// still located on AssetHub. #[test] @@ -296,6 +335,10 @@ fn send_token_from_ethereum_to_penpal() { // Construct RegisterToken message and sent to inbound queue send_inbound_message(make_register_token_message()).unwrap(); + // Construct SendToken message to AssetHub(only for increase the nonce as the same order in + // smoke test) + send_inbound_message(make_send_token_message()).unwrap(); + // Construct SendToken message and sent to inbound queue send_inbound_message(make_send_token_to_penpal_message()).unwrap(); @@ -331,46 +374,6 @@ fn send_token_from_ethereum_to_penpal() { }); } -/// Tests the registering of a token as an asset on AssetHub, and then subsequently sending -/// a token from Ethereum to AssetHub. -#[test] -fn send_token_from_ethereum_to_asset_hub() { - BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id().into(), INITIAL_FUND); - - // Fund ethereum sovereign on AssetHub - AssetHubRococo::fund_accounts(vec![(AssetHubRococoReceiver::get(), INITIAL_FUND)]); - - BridgeHubRococo::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - - // Construct RegisterToken message and sent to inbound queue - send_inbound_message(make_register_token_message()).unwrap(); - - // Construct SendToken message and sent to inbound queue - send_inbound_message(make_send_token_message()).unwrap(); - - // Check that the message was sent - assert_expected_events!( - BridgeHubRococo, - vec![ - RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {}, - ] - ); - }); - - AssetHubRococo::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - - // Check that the token was received and issued as a foreign asset on AssetHub - assert_expected_events!( - AssetHubRococo, - vec![ - RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { .. }) => {}, - ] - ); - }); -} - /// Tests the full cycle of token transfers: /// - registering a token on AssetHub /// - sending a token to AssetHub @@ -458,12 +461,13 @@ fn send_weth_asset_from_asset_hub_to_ethereum() { AssetHubRococoReceiver::get(), ); // Send the Weth back to Ethereum - ::PolkadotXcm::reserve_transfer_assets( + ::PolkadotXcm::limited_reserve_transfer_assets( RuntimeOrigin::signed(AssetHubRococoReceiver::get()), Box::new(destination), Box::new(beneficiary), Box::new(multi_assets), 0, + Unlimited, ) .unwrap(); let free_balance_after = ::Balances::free_balance( @@ -506,16 +510,35 @@ fn send_weth_asset_from_asset_hub_to_ethereum() { }); } +#[test] +fn send_token_from_ethereum_to_asset_hub_fail_for_insufficient_fund() { + // Insufficient fund + BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id().into(), 1_000); + + BridgeHubRococo::execute_with(|| { + assert_err!(send_inbound_message(make_register_token_message()), Token(FundsUnavailable)); + }); +} + #[test] fn register_weth_token_in_asset_hub_fail_for_insufficient_fee() { BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id().into(), INITIAL_FUND); BridgeHubRococo::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; - - // Construct RegisterToken message and sent to inbound queue - let message = make_register_token_with_infufficient_fee_message(); - send_inbound_message(message).unwrap(); + type EthereumInboundQueue = + ::EthereumInboundQueue; + let message_id: H256 = [0; 32].into(); + let message = VersionedMessage::V1(MessageV1 { + chain_id: CHAIN_ID, + command: Command::RegisterToken { + token: WETH.into(), + // Insufficient fee which should trigger the trap + fee: INSUFFICIENT_XCM_FEE, + }, + }); + let (xcm, _) = EthereumInboundQueue::do_convert(message_id, message).unwrap(); + let _ = EthereumInboundQueue::send_xcm(xcm, AssetHubRococo::para_id().into()).unwrap(); assert_expected_events!( BridgeHubRococo, @@ -536,13 +559,3 @@ fn register_weth_token_in_asset_hub_fail_for_insufficient_fee() { ); }); } - -#[test] -fn send_token_from_ethereum_to_asset_hub_fail_for_insufficient_fund() { - // Insufficient fund - BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id().into(), 1_000); - - BridgeHubRococo::execute_with(|| { - assert_err!(send_inbound_message(make_register_token_message()), Arithmetic(Underflow)); - }); -} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml index 9d55903c8583..9c45a7adeb4e 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml @@ -11,6 +11,7 @@ publish = false workspace = true [dependencies] +codec = { package = "parity-scale-codec", version = "3.6.0" } # Substrate frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false } @@ -30,8 +31,8 @@ pallet-bridge-messages = { path = "../../../../../../../bridges/modules/messages bp-messages = { path = "../../../../../../../bridges/primitives/messages", default-features = false } # Cumulus -parachains-common = { path = "../../../../../../parachains/common" } +parachains-common = { path = "../../../../../common" } cumulus-pallet-xcmp-queue = { path = "../../../../../../pallets/xcmp-queue", default-features = false } -bridge-hub-westend-runtime = { path = "../../../../../../parachains/runtimes/bridge-hubs/bridge-hub-westend", default-features = false } +bridge-hub-westend-runtime = { path = "../../../../../runtimes/bridge-hubs/bridge-hub-westend", default-features = false } emulated-integration-tests-common = { path = "../../../common", default-features = false } rococo-westend-system-emulated-network = { path = "../../../networks/rococo-westend-system" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs index b01be5e8dc84..f69747c17704 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs @@ -14,6 +14,7 @@ // limitations under the License. use crate::tests::*; +use codec::Encode; #[test] fn send_xcm_from_westend_relay_to_rococo_asset_hub_should_fail_on_not_applicable() { @@ -26,7 +27,7 @@ fn send_xcm_from_westend_relay_to_rococo_asset_hub_should_fail_on_not_applicable let remote_xcm = Xcm(vec![ClearOrigin]); - let xcm = VersionedXcm::from(Xcm(vec![ + let xcm = VersionedXcm::from(Xcm::<()>(vec![ UnpaidExecution { weight_limit, check_origin }, ExportMessage { network: RococoId, @@ -38,10 +39,10 @@ fn send_xcm_from_westend_relay_to_rococo_asset_hub_should_fail_on_not_applicable // Westend Global Consensus // Send XCM message from Relay Chain to Bridge Hub source Parachain Westend::execute_with(|| { - assert_ok!(::XcmPallet::send( + assert_ok!(::XcmPallet::send_blob( sudo_origin, bx!(destination), - bx!(xcm), + xcm.encode().try_into().unwrap(), )); type RuntimeEvent = ::RuntimeEvent; diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/Cargo.toml index 609376c1fee6..1570aa7662fc 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/Cargo.toml @@ -26,7 +26,7 @@ polkadot-runtime-common = { path = "../../../../../../../polkadot/runtime/common # Cumulus asset-test-utils = { path = "../../../../../runtimes/assets/test-utils" } -parachains-common = { path = "../../../../../../parachains/common" } +parachains-common = { path = "../../../../../common" } people-rococo-runtime = { path = "../../../../../runtimes/people/people-rococo" } emulated-integration-tests-common = { path = "../../../common", default-features = false } rococo-system-emulated-network = { path = "../../../networks/rococo-system" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs index 3abe5c6cf66a..350d87d638ab 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs @@ -155,7 +155,7 @@ fn limited_teleport_native_assets_from_relay_to_system_para_works() { test.assert(); let delivery_fees = Rococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -204,7 +204,7 @@ fn limited_teleport_native_assets_back_from_system_para_to_relay_works() { let receiver_balance_after = test.receiver.balance; let delivery_fees = PeopleRococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -248,7 +248,7 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { let receiver_balance_after = test.receiver.balance; let delivery_fees = PeopleRococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml index f2f3366798a0..bc093dc0de63 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml @@ -26,7 +26,7 @@ polkadot-runtime-common = { path = "../../../../../../../polkadot/runtime/common # Cumulus asset-test-utils = { path = "../../../../../runtimes/assets/test-utils" } -parachains-common = { path = "../../../../../../parachains/common" } +parachains-common = { path = "../../../../../common" } people-westend-runtime = { path = "../../../../../runtimes/people/people-westend" } emulated-integration-tests-common = { path = "../../../common", default-features = false } westend-system-emulated-network = { path = "../../../networks/westend-system" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs index eef35a99a833..8697477ba769 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs @@ -155,7 +155,7 @@ fn limited_teleport_native_assets_from_relay_to_system_para_works() { test.assert(); let delivery_fees = Westend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -204,7 +204,7 @@ fn limited_teleport_native_assets_back_from_system_para_to_relay_works() { let receiver_balance_after = test.receiver.balance; let delivery_fees = PeopleWestend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -248,7 +248,7 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { let receiver_balance_after = test.receiver.balance; let delivery_fees = PeopleWestend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); diff --git a/cumulus/parachains/pallets/collective-content/Cargo.toml b/cumulus/parachains/pallets/collective-content/Cargo.toml index b0a1f98da8c9..93fb43de3d2f 100644 --- a/cumulus/parachains/pallets/collective-content/Cargo.toml +++ b/cumulus/parachains/pallets/collective-content/Cargo.toml @@ -11,9 +11,9 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", optional = true, default-features = false } +frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-support = { path = "../../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../../substrate/frame/system", default-features = false } diff --git a/cumulus/parachains/pallets/parachain-info/Cargo.toml b/cumulus/parachains/pallets/parachain-info/Cargo.toml index 451b7734daee..8c064962c02c 100644 --- a/cumulus/parachains/pallets/parachain-info/Cargo.toml +++ b/cumulus/parachains/pallets/parachain-info/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../../substrate/frame/system", default-features = false } diff --git a/cumulus/parachains/pallets/ping/Cargo.toml b/cumulus/parachains/pallets/ping/Cargo.toml index 83d059fe8829..a52849624c07 100644 --- a/cumulus/parachains/pallets/ping/Cargo.toml +++ b/cumulus/parachains/pallets/ping/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } frame-support = { path = "../../../../substrate/frame/support", default-features = false } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml index 535e6613a0f6..b8a5e72d92ec 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml @@ -13,7 +13,7 @@ workspace = true codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } hex-literal = { version = "0.4.1" } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } @@ -77,6 +77,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } @@ -85,10 +86,10 @@ assets-common = { path = "../common", default-features = false } # Bridges pallet-xcm-bridge-hub-router = { path = "../../../../../bridges/modules/xcm-bridge-hub-router", default-features = false } -bp-asset-hub-rococo = { path = "../../../../../bridges/primitives/chain-asset-hub-rococo", default-features = false } -bp-asset-hub-westend = { path = "../../../../../bridges/primitives/chain-asset-hub-westend", default-features = false } -bp-bridge-hub-rococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-rococo", default-features = false } -bp-bridge-hub-westend = { path = "../../../../../bridges/primitives/chain-bridge-hub-westend", default-features = false } +bp-asset-hub-rococo = { path = "../../../../../bridges/chains/chain-asset-hub-rococo", default-features = false } +bp-asset-hub-westend = { path = "../../../../../bridges/chains/chain-asset-hub-westend", default-features = false } +bp-bridge-hub-rococo = { path = "../../../../../bridges/chains/chain-bridge-hub-rococo", default-features = false } +bp-bridge-hub-westend = { path = "../../../../../bridges/chains/chain-bridge-hub-westend", default-features = false } snowbridge-router-primitives = { path = "../../../../../bridges/snowbridge/primitives/router", default-features = false } [dev-dependencies] @@ -189,6 +190,7 @@ std = [ "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-aura/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 96591fd9f435..d0847db577f4 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -116,7 +116,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("statemine"), impl_name: create_runtime_str!("statemine"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 14, @@ -129,7 +129,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("statemine"), impl_name: create_runtime_str!("statemine"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 14, @@ -663,6 +663,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; } impl parachain_info::Config for Runtime {} @@ -957,6 +958,7 @@ pub type SignedExtra = ( frame_system::CheckNonce, frame_system::CheckWeight, pallet_asset_conversion_tx_payment::ChargeAssetTxPayment, + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs index 51b6543bae82..e0e231d7da27 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,8 +64,30 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 22_136_000 picoseconds. - Weight::from_parts(22_518_000, 0) + // Minimum execution time: 21_224_000 picoseconds. + Weight::from_parts(21_821_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn send_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 21_474_000 picoseconds. + Weight::from_parts(22_072_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -90,8 +112,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 92_277_000 picoseconds. - Weight::from_parts(94_843_000, 0) + // Minimum execution time: 90_677_000 picoseconds. + Weight::from_parts(93_658_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -118,8 +140,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `400` // Estimated: `6196` - // Minimum execution time: 120_110_000 picoseconds. - Weight::from_parts(122_968_000, 0) + // Minimum execution time: 116_767_000 picoseconds. + Weight::from_parts(118_843_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(5)) @@ -148,8 +170,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `496` // Estimated: `6208` - // Minimum execution time: 143_116_000 picoseconds. - Weight::from_parts(147_355_000, 0) + // Minimum execution time: 137_983_000 picoseconds. + Weight::from_parts(141_396_000, 0) .saturating_add(Weight::from_parts(0, 6208)) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(7)) @@ -164,14 +186,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn execute_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_517_000 picoseconds. - Weight::from_parts(6_756_000, 0) + // Minimum execution time: 6_232_000 picoseconds. + Weight::from_parts(6_507_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -181,8 +213,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_894_000 picoseconds. - Weight::from_parts(2_024_000, 0) + // Minimum execution time: 1_884_000 picoseconds. + Weight::from_parts(2_016_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -208,8 +240,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 27_314_000 picoseconds. - Weight::from_parts(28_787_000, 0) + // Minimum execution time: 26_637_000 picoseconds. + Weight::from_parts(27_616_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -234,8 +266,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `363` // Estimated: `3828` - // Minimum execution time: 29_840_000 picoseconds. - Weight::from_parts(30_589_000, 0) + // Minimum execution time: 28_668_000 picoseconds. + Weight::from_parts(29_413_000, 0) .saturating_add(Weight::from_parts(0, 3828)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -246,8 +278,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_893_000 picoseconds. - Weight::from_parts(2_017_000, 0) + // Minimum execution time: 1_990_000 picoseconds. + Weight::from_parts(2_114_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -257,8 +289,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `159` // Estimated: `13524` - // Minimum execution time: 19_211_000 picoseconds. - Weight::from_parts(19_552_000, 0) + // Minimum execution time: 18_856_000 picoseconds. + Weight::from_parts(19_430_000, 0) .saturating_add(Weight::from_parts(0, 13524)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -269,8 +301,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `163` // Estimated: `13528` - // Minimum execution time: 19_177_000 picoseconds. - Weight::from_parts(19_704_000, 0) + // Minimum execution time: 19_068_000 picoseconds. + Weight::from_parts(19_434_000, 0) .saturating_add(Weight::from_parts(0, 13528)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -281,8 +313,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `16013` - // Minimum execution time: 20_449_000 picoseconds. - Weight::from_parts(21_075_000, 0) + // Minimum execution time: 21_055_000 picoseconds. + Weight::from_parts(21_379_000, 0) .saturating_add(Weight::from_parts(0, 16013)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -304,8 +336,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `6152` - // Minimum execution time: 26_578_000 picoseconds. - Weight::from_parts(27_545_000, 0) + // Minimum execution time: 25_736_000 picoseconds. + Weight::from_parts(26_423_000, 0) .saturating_add(Weight::from_parts(0, 6152)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -316,8 +348,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `206` // Estimated: `11096` - // Minimum execution time: 11_646_000 picoseconds. - Weight::from_parts(11_944_000, 0) + // Minimum execution time: 11_853_000 picoseconds. + Weight::from_parts(12_215_000, 0) .saturating_add(Weight::from_parts(0, 11096)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -327,8 +359,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `170` // Estimated: `13535` - // Minimum execution time: 19_301_000 picoseconds. - Weight::from_parts(19_664_000, 0) + // Minimum execution time: 19_418_000 picoseconds. + Weight::from_parts(19_794_000, 0) .saturating_add(Weight::from_parts(0, 13535)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -351,8 +383,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `13577` - // Minimum execution time: 35_715_000 picoseconds. - Weight::from_parts(36_915_000, 0) + // Minimum execution time: 34_719_000 picoseconds. + Weight::from_parts(35_260_000, 0) .saturating_add(Weight::from_parts(0, 13577)) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) @@ -365,8 +397,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `1588` - // Minimum execution time: 4_871_000 picoseconds. - Weight::from_parts(5_066_000, 0) + // Minimum execution time: 4_937_000 picoseconds. + Weight::from_parts(5_203_000, 0) .saturating_add(Weight::from_parts(0, 1588)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -377,8 +409,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7740` // Estimated: `11205` - // Minimum execution time: 25_150_000 picoseconds. - Weight::from_parts(26_119_000, 0) + // Minimum execution time: 26_064_000 picoseconds. + Weight::from_parts(26_497_000, 0) .saturating_add(Weight::from_parts(0, 11205)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -389,8 +421,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `160` // Estimated: `3625` - // Minimum execution time: 38_248_000 picoseconds. - Weight::from_parts(39_122_000, 0) + // Minimum execution time: 37_132_000 picoseconds. + Weight::from_parts(37_868_000, 0) .saturating_add(Weight::from_parts(0, 3625)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs index 2e081711e091..c3844c71dd72 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs @@ -192,7 +192,7 @@ pub type ForeignFungiblesTransactor = FungiblesAdapter< LocationToAccountId, // Our chain's account ID type (we can't get away without mentioning it explicitly): AccountId, - // We dont need to check teleports here. + // We don't need to check teleports here. NoChecking, // The account to use for tracking teleports. CheckingAccount, @@ -286,8 +286,8 @@ impl Contains for SafeCallFilter { match call { RuntimeCall::System(frame_system::Call::set_storage { items }) if items.iter().all(|(k, _)| { - k.eq(&bridging::XcmBridgeHubRouterByteFee::key()) | - k.eq(&bridging::XcmBridgeHubRouterBaseFee::key()) | + k.eq(&bridging::XcmBridgeHubRouterByteFee::key()) || + k.eq(&bridging::XcmBridgeHubRouterBaseFee::key()) || k.eq(&bridging::to_ethereum::BridgeHubEthereumBaseFee::key()) }) => return true, @@ -628,6 +628,9 @@ impl xcm_executor::Config for XcmConfig { type SafeCallFilter = SafeCallFilter; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); } /// Converts a local signed origin into an XCM location. diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml index de2f458b84d7..bce247b57525 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml @@ -13,7 +13,7 @@ workspace = true codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } hex-literal = { version = "0.4.1", optional = true } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } @@ -75,6 +75,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } @@ -83,10 +84,10 @@ assets-common = { path = "../common", default-features = false } # Bridges pallet-xcm-bridge-hub-router = { path = "../../../../../bridges/modules/xcm-bridge-hub-router", default-features = false } -bp-asset-hub-rococo = { path = "../../../../../bridges/primitives/chain-asset-hub-rococo", default-features = false } -bp-asset-hub-westend = { path = "../../../../../bridges/primitives/chain-asset-hub-westend", default-features = false } -bp-bridge-hub-rococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-rococo", default-features = false } -bp-bridge-hub-westend = { path = "../../../../../bridges/primitives/chain-bridge-hub-westend", default-features = false } +bp-asset-hub-rococo = { path = "../../../../../bridges/chains/chain-asset-hub-rococo", default-features = false } +bp-asset-hub-westend = { path = "../../../../../bridges/chains/chain-asset-hub-westend", default-features = false } +bp-bridge-hub-rococo = { path = "../../../../../bridges/chains/chain-bridge-hub-rococo", default-features = false } +bp-bridge-hub-westend = { path = "../../../../../bridges/chains/chain-bridge-hub-westend", default-features = false } [dev-dependencies] hex-literal = "0.4.1" @@ -177,6 +178,7 @@ std = [ "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-aura/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index a7518b62509a..69dd2ccae7bf 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -113,7 +113,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westmint"), impl_name: create_runtime_str!("westmint"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 14, @@ -644,6 +644,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; } impl cumulus_pallet_aura_ext::Config for Runtime {} @@ -929,6 +930,7 @@ pub type SignedExtra = ( frame_system::CheckNonce, frame_system::CheckWeight, pallet_asset_conversion_tx_payment::ChargeAssetTxPayment, + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs index 71facff87d35..a36c25f96043 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-f3xfxtob-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,8 +64,30 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 21_630_000 picoseconds. - Weight::from_parts(22_306_000, 0) + // Minimum execution time: 21_050_000 picoseconds. + Weight::from_parts(21_834_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn send_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 21_164_000 picoseconds. + Weight::from_parts(21_656_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -90,8 +112,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 91_802_000 picoseconds. - Weight::from_parts(93_672_000, 0) + // Minimum execution time: 92_497_000 picoseconds. + Weight::from_parts(95_473_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -118,8 +140,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `367` // Estimated: `6196` - // Minimum execution time: 118_930_000 picoseconds. - Weight::from_parts(122_306_000, 0) + // Minimum execution time: 120_059_000 picoseconds. + Weight::from_parts(122_894_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(5)) @@ -148,8 +170,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `496` // Estimated: `6208` - // Minimum execution time: 140_527_000 picoseconds. - Weight::from_parts(144_501_000, 0) + // Minimum execution time: 141_977_000 picoseconds. + Weight::from_parts(145_981_000, 0) .saturating_add(Weight::from_parts(0, 6208)) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(7)) @@ -158,8 +180,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_556_000 picoseconds. - Weight::from_parts(7_798_000, 0) + // Minimum execution time: 7_426_000 picoseconds. + Weight::from_parts(7_791_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn execute_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_585_000 picoseconds. + Weight::from_parts(7_897_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) @@ -168,8 +198,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_373_000 picoseconds. - Weight::from_parts(6_603_000, 0) + // Minimum execution time: 6_224_000 picoseconds. + Weight::from_parts(6_793_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -179,8 +209,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_941_000 picoseconds. - Weight::from_parts(2_088_000, 0) + // Minimum execution time: 1_812_000 picoseconds. + Weight::from_parts(2_008_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -206,8 +236,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 27_080_000 picoseconds. - Weight::from_parts(27_820_000, 0) + // Minimum execution time: 26_586_000 picoseconds. + Weight::from_parts(27_181_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -232,8 +262,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `363` // Estimated: `3828` - // Minimum execution time: 28_850_000 picoseconds. - Weight::from_parts(29_506_000, 0) + // Minimum execution time: 28_295_000 picoseconds. + Weight::from_parts(29_280_000, 0) .saturating_add(Weight::from_parts(0, 3828)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -244,8 +274,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_033_000 picoseconds. - Weight::from_parts(2_201_000, 0) + // Minimum execution time: 1_803_000 picoseconds. + Weight::from_parts(1_876_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -255,8 +285,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `159` // Estimated: `13524` - // Minimum execution time: 18_844_000 picoseconds. - Weight::from_parts(19_197_000, 0) + // Minimum execution time: 18_946_000 picoseconds. + Weight::from_parts(19_456_000, 0) .saturating_add(Weight::from_parts(0, 13524)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -267,8 +297,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `163` // Estimated: `13528` - // Minimum execution time: 18_940_000 picoseconds. - Weight::from_parts(19_450_000, 0) + // Minimum execution time: 19_080_000 picoseconds. + Weight::from_parts(19_498_000, 0) .saturating_add(Weight::from_parts(0, 13528)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -279,8 +309,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `16013` - // Minimum execution time: 20_521_000 picoseconds. - Weight::from_parts(21_076_000, 0) + // Minimum execution time: 20_637_000 picoseconds. + Weight::from_parts(21_388_000, 0) .saturating_add(Weight::from_parts(0, 16013)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -302,8 +332,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `6152` - // Minimum execution time: 26_007_000 picoseconds. - Weight::from_parts(26_448_000, 0) + // Minimum execution time: 25_701_000 picoseconds. + Weight::from_parts(26_269_000, 0) .saturating_add(Weight::from_parts(0, 6152)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -314,8 +344,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `206` // Estimated: `11096` - // Minimum execution time: 11_584_000 picoseconds. - Weight::from_parts(12_080_000, 0) + // Minimum execution time: 11_949_000 picoseconds. + Weight::from_parts(12_249_000, 0) .saturating_add(Weight::from_parts(0, 11096)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -325,8 +355,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `170` // Estimated: `13535` - // Minimum execution time: 19_157_000 picoseconds. - Weight::from_parts(19_513_000, 0) + // Minimum execution time: 19_278_000 picoseconds. + Weight::from_parts(19_538_000, 0) .saturating_add(Weight::from_parts(0, 13535)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -349,8 +379,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `13577` - // Minimum execution time: 34_878_000 picoseconds. - Weight::from_parts(35_623_000, 0) + // Minimum execution time: 35_098_000 picoseconds. + Weight::from_parts(35_871_000, 0) .saturating_add(Weight::from_parts(0, 13577)) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) @@ -363,8 +393,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `1588` - // Minimum execution time: 3_900_000 picoseconds. - Weight::from_parts(4_161_000, 0) + // Minimum execution time: 3_862_000 picoseconds. + Weight::from_parts(4_082_000, 0) .saturating_add(Weight::from_parts(0, 1588)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -375,8 +405,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7740` // Estimated: `11205` - // Minimum execution time: 25_731_000 picoseconds. - Weight::from_parts(26_160_000, 0) + // Minimum execution time: 25_423_000 picoseconds. + Weight::from_parts(25_872_000, 0) .saturating_add(Weight::from_parts(0, 11205)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -387,8 +417,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `160` // Estimated: `3625` - // Minimum execution time: 37_251_000 picoseconds. - Weight::from_parts(38_075_000, 0) + // Minimum execution time: 37_148_000 picoseconds. + Weight::from_parts(37_709_000, 0) .saturating_add(Weight::from_parts(0, 3625)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs index 58707a7c140a..835acbf01980 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs @@ -185,7 +185,7 @@ pub type ForeignFungiblesTransactor = FungiblesAdapter< LocationToAccountId, // Our chain's account ID type (we can't get away without mentioning it explicitly): AccountId, - // We dont need to check teleports here. + // We don't need to check teleports here. NoChecking, // The account to use for tracking teleports. CheckingAccount, @@ -649,6 +649,9 @@ impl xcm_executor::Config for XcmConfig { type SafeCallFilter = SafeCallFilter; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); } /// Local origins on this chain are allowed to dispatch XCM sends/executions. diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs index 05ef3f27089e..64649cd9919b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs @@ -620,7 +620,7 @@ fn test_asset_xcm_take_first_trader_with_refund() { // We actually use half of the weight let weight_used = bought / 2; - // Make sure refurnd works. + // Make sure refund works. let amount_refunded = WeightToFee::weight_to_fee(&(bought - weight_used)); assert_eq!( @@ -747,7 +747,7 @@ fn test_that_buying_ed_refund_does_not_refund_for_take_first_trader() { // Buy weight should work assert_ok!(trader.buy_weight(bought, asset.into(), &ctx)); - // Should return None. We have a specific check making sure we dont go below ED for + // Should return None. We have a specific check making sure we don't go below ED for // drop payment assert_eq!(trader.refund_weight(bought, &ctx), None); diff --git a/cumulus/parachains/runtimes/assets/common/Cargo.toml b/cumulus/parachains/runtimes/assets/common/Cargo.toml index 5b365ebd0d91..340522e79c32 100644 --- a/cumulus/parachains/runtimes/assets/common/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/common/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } log = { workspace = true } impl-trait-for-tuples = "0.2.2" diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs index 53e10956bd0d..884b71369e79 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs @@ -186,7 +186,7 @@ pub fn teleports_for_native_asset_works< // Mint funds into account to ensure it has enough balance to pay delivery fees let delivery_fees = - xcm_helpers::transfer_assets_delivery_fees::( + xcm_helpers::teleport_assets_delivery_fees::( (native_asset_id.clone(), native_asset_to_teleport_away.into()).into(), 0, Unlimited, @@ -579,7 +579,7 @@ pub fn teleports_for_foreign_assets_works< // Make sure the target account has enough native asset to pay for delivery fees let delivery_fees = - xcm_helpers::transfer_assets_delivery_fees::( + xcm_helpers::teleport_assets_delivery_fees::( (foreign_asset_id_location_latest.clone(), asset_to_teleport_away).into(), 0, Unlimited, @@ -1120,7 +1120,7 @@ pub fn create_and_manage_foreign_assets_for_local_consensus_parachain_assets_wor AssetId: Clone, AssetIdConverter: MaybeEquivalence, { - // foreign parachain with the same consenus currency as asset + // foreign parachain with the same consensus currency as asset let foreign_asset_id_location = Location::new(1, [Parachain(2222), GeneralIndex(1234567)]); let asset_id = AssetIdConverter::convert(&foreign_asset_id_location).unwrap(); diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs index 2c35cb6aa35e..9968aee00663 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs @@ -387,7 +387,7 @@ pub fn receive_reserve_asset_deposited_from_different_consensus_works< existential_deposit, ); - // create foreign asset for wrapped/derivated representation + // create foreign asset for wrapped/derived representation assert_ok!( >::force_create( RuntimeHelper::::root_origin(), diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/xcm_helpers.rs b/cumulus/parachains/runtimes/assets/test-utils/src/xcm_helpers.rs index f509a3a8acaa..ca0e81fae42e 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/xcm_helpers.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/xcm_helpers.rs @@ -18,11 +18,10 @@ use xcm::latest::prelude::*; -/// Returns the delivery fees amount for pallet xcm's `teleport_assets` and -/// `reserve_transfer_assets` extrinsics. +/// Returns the delivery fees amount for pallet xcm's `teleport_assets` extrinsics. /// Because it returns only a `u128`, it assumes delivery fees are only paid /// in one asset and that asset is known. -pub fn transfer_assets_delivery_fees( +pub fn teleport_assets_delivery_fees( assets: Assets, fee_asset_item: u32, weight_limit: WeightLimit, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index 8633159fedd5..1dfe93fa5ab4 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -16,7 +16,7 @@ substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1" } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = [ +scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } @@ -73,6 +73,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f ] } cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } @@ -80,20 +81,20 @@ parachains-common = { path = "../../../common", default-features = false } testnet-parachains-constants = { path = "../../constants", default-features = false, features = ["rococo"] } # Bridges -bp-asset-hub-rococo = { path = "../../../../../bridges/primitives/chain-asset-hub-rococo", default-features = false } -bp-asset-hub-westend = { path = "../../../../../bridges/primitives/chain-asset-hub-westend", default-features = false } -bp-bridge-hub-polkadot = { path = "../../../../../bridges/primitives/chain-bridge-hub-polkadot", default-features = false } -bp-bridge-hub-rococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-rococo", default-features = false } -bp-bridge-hub-westend = { path = "../../../../../bridges/primitives/chain-bridge-hub-westend", default-features = false } +bp-asset-hub-rococo = { path = "../../../../../bridges/chains/chain-asset-hub-rococo", default-features = false } +bp-asset-hub-westend = { path = "../../../../../bridges/chains/chain-asset-hub-westend", default-features = false } +bp-bridge-hub-polkadot = { path = "../../../../../bridges/chains/chain-bridge-hub-polkadot", default-features = false } +bp-bridge-hub-rococo = { path = "../../../../../bridges/chains/chain-bridge-hub-rococo", default-features = false } +bp-bridge-hub-westend = { path = "../../../../../bridges/chains/chain-bridge-hub-westend", default-features = false } bp-header-chain = { path = "../../../../../bridges/primitives/header-chain", default-features = false } bp-messages = { path = "../../../../../bridges/primitives/messages", default-features = false } bp-parachains = { path = "../../../../../bridges/primitives/parachains", default-features = false } -bp-polkadot-bulletin = { path = "../../../../../bridges/primitives/chain-polkadot-bulletin", default-features = false } +bp-polkadot-bulletin = { path = "../../../../../bridges/chains/chain-polkadot-bulletin", default-features = false } bp-polkadot-core = { path = "../../../../../bridges/primitives/polkadot-core", default-features = false } bp-relayers = { path = "../../../../../bridges/primitives/relayers", default-features = false } bp-runtime = { path = "../../../../../bridges/primitives/runtime", default-features = false } -bp-rococo = { path = "../../../../../bridges/primitives/chain-rococo", default-features = false } -bp-westend = { path = "../../../../../bridges/primitives/chain-westend", default-features = false } +bp-rococo = { path = "../../../../../bridges/chains/chain-rococo", default-features = false } +bp-westend = { path = "../../../../../bridges/chains/chain-westend", default-features = false } pallet-bridge-grandpa = { path = "../../../../../bridges/modules/grandpa", default-features = false } pallet-bridge-messages = { path = "../../../../../bridges/modules/messages", default-features = false } pallet-bridge-parachains = { path = "../../../../../bridges/modules/parachains", default-features = false } @@ -149,6 +150,7 @@ std = [ "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-aura/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "cumulus-primitives-utility/std", "frame-benchmarking/std", "frame-executive/std", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 6a6445b27d50..83c511fc5c5c 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -42,7 +42,9 @@ use alloc::{boxed::Box, vec::Vec}; use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; use snowbridge_beacon_primitives::{Fork, ForkVersions}; use snowbridge_core::{ - gwei, meth, outbound::Message, AgentId, AllowSiblingsOnly, PricingParameters, Rewards, + gwei, meth, + outbound::{Command, Fee}, + AgentId, AllowSiblingsOnly, PricingParameters, Rewards, }; use snowbridge_router_primitives::inbound::MessageToXcm; use sp_api::impl_runtime_apis; @@ -101,8 +103,6 @@ use parachains_common::{ AVERAGE_ON_INITIALIZE_RATIO, NORMAL_DISPATCH_RATIO, }; -use polkadot_runtime_common::prod_or_fast; - #[cfg(feature = "runtime-benchmarks")] use benchmark_helpers::DoNothingRouter; @@ -133,6 +133,7 @@ pub type SignedExtra = ( bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages, bridge_to_bulletin_config::OnBridgeHubRococoRefundRococoBulletinMessages, ), + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. @@ -205,7 +206,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("bridge-hub-rococo"), impl_name: create_runtime_str!("bridge-hub-rococo"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 4, @@ -388,6 +389,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; } impl cumulus_pallet_aura_ext::Config for Runtime {} @@ -506,7 +508,8 @@ parameter_types! { pub Parameters: PricingParameters = PricingParameters { exchange_rate: FixedU128::from_rational(1, 400), fee_per_gas: gwei(20), - rewards: Rewards { local: 1 * UNITS, remote: meth(1) } + rewards: Rewards { local: 1 * UNITS, remote: meth(1) }, + multiplier: FixedU128::from_rational(1, 1), }; } @@ -514,14 +517,14 @@ parameter_types! { pub mod benchmark_helpers { use crate::{EthereumBeaconClient, Runtime, RuntimeOrigin}; use codec::Encode; - use snowbridge_beacon_primitives::CompactExecutionHeader; + use snowbridge_beacon_primitives::BeaconHeader; use snowbridge_pallet_inbound_queue::BenchmarkHelper; use sp_core::H256; use xcm::latest::{Assets, Location, SendError, SendResult, SendXcm, Xcm, XcmHash}; impl BenchmarkHelper for Runtime { - fn initialize_storage(block_hash: H256, header: CompactExecutionHeader) { - EthereumBeaconClient::store_execution_header(block_hash, header, 0, H256::default()) + fn initialize_storage(beacon_header: BeaconHeader, block_roots_root: H256) { + EthereumBeaconClient::store_finalized_header(beacon_header, block_roots_root).unwrap(); } } @@ -642,14 +645,9 @@ parameter_types! { }; } -parameter_types! { - pub const MaxExecutionHeadersToKeep: u32 = prod_or_fast!(8192 * 2, 1000); -} - impl snowbridge_pallet_ethereum_client::Config for Runtime { type RuntimeEvent = RuntimeEvent; type ForkVersions = ChainForkVersions; - type MaxExecutionHeadersToKeep = MaxExecutionHeadersToKeep; type WeightInfo = weights::snowbridge_pallet_ethereum_client::WeightInfo; } @@ -1025,8 +1023,8 @@ impl_runtime_apis! { snowbridge_pallet_outbound_queue::api::prove_message::(leaf_index) } - fn calculate_fee(message: Message) -> Option { - snowbridge_pallet_outbound_queue::api::calculate_fee::(message) + fn calculate_fee(command: Command, parameters: Option>) -> Fee { + snowbridge_pallet_outbound_queue::api::calculate_fee::(command, parameters) } } @@ -1500,7 +1498,8 @@ mod tests { ( bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages::default(), bridge_to_bulletin_config::OnBridgeHubRococoRefundRococoBulletinMessages::default(), - ) + ), + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), ); // for BridgeHubRococo diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs index a732e1a57343..adfaa9ea2028 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,8 +64,30 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 18_513_000 picoseconds. - Weight::from_parts(19_156_000, 0) + // Minimum execution time: 18_732_000 picoseconds. + Weight::from_parts(19_386_000, 0) + .saturating_add(Weight::from_parts(0, 3503)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn send_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `38` + // Estimated: `3503` + // Minimum execution time: 18_943_000 picoseconds. + Weight::from_parts(19_455_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -90,8 +112,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3593` - // Minimum execution time: 88_096_000 picoseconds. - Weight::from_parts(89_732_000, 0) + // Minimum execution time: 88_917_000 picoseconds. + Weight::from_parts(91_611_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -126,8 +148,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3593` - // Minimum execution time: 88_239_000 picoseconds. - Weight::from_parts(89_729_000, 0) + // Minimum execution time: 88_587_000 picoseconds. + Weight::from_parts(90_303_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -142,14 +164,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn execute_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_955_000 picoseconds. - Weight::from_parts(6_266_000, 0) + // Minimum execution time: 5_856_000 picoseconds. + Weight::from_parts(6_202_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -159,8 +191,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_868_000 picoseconds. - Weight::from_parts(1_961_000, 0) + // Minimum execution time: 1_797_000 picoseconds. + Weight::from_parts(1_970_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -186,8 +218,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 24_388_000 picoseconds. - Weight::from_parts(25_072_000, 0) + // Minimum execution time: 24_479_000 picoseconds. + Weight::from_parts(25_058_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -212,8 +244,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `255` // Estimated: `3720` - // Minimum execution time: 26_762_000 picoseconds. - Weight::from_parts(27_631_000, 0) + // Minimum execution time: 27_282_000 picoseconds. + Weight::from_parts(27_924_000, 0) .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -224,8 +256,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_856_000 picoseconds. - Weight::from_parts(2_033_000, 0) + // Minimum execution time: 1_801_000 picoseconds. + Weight::from_parts(1_988_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -235,8 +267,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89` // Estimated: `13454` - // Minimum execution time: 17_718_000 picoseconds. - Weight::from_parts(18_208_000, 0) + // Minimum execution time: 16_509_000 picoseconds. + Weight::from_parts(16_939_000, 0) .saturating_add(Weight::from_parts(0, 13454)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -247,8 +279,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `93` // Estimated: `13458` - // Minimum execution time: 17_597_000 picoseconds. - Weight::from_parts(18_090_000, 0) + // Minimum execution time: 16_140_000 picoseconds. + Weight::from_parts(16_843_000, 0) .saturating_add(Weight::from_parts(0, 13458)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -259,8 +291,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `15946` - // Minimum execution time: 19_533_000 picoseconds. - Weight::from_parts(20_164_000, 0) + // Minimum execution time: 18_160_000 picoseconds. + Weight::from_parts(18_948_000, 0) .saturating_add(Weight::from_parts(0, 15946)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -282,8 +314,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 24_958_000 picoseconds. - Weight::from_parts(25_628_000, 0) + // Minimum execution time: 24_409_000 picoseconds. + Weight::from_parts(25_261_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -294,8 +326,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `136` // Estimated: `11026` - // Minimum execution time: 12_209_000 picoseconds. - Weight::from_parts(12_612_000, 0) + // Minimum execution time: 10_848_000 picoseconds. + Weight::from_parts(11_241_000, 0) .saturating_add(Weight::from_parts(0, 11026)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -305,8 +337,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `100` // Estimated: `13465` - // Minimum execution time: 17_844_000 picoseconds. - Weight::from_parts(18_266_000, 0) + // Minimum execution time: 16_609_000 picoseconds. + Weight::from_parts(17_044_000, 0) .saturating_add(Weight::from_parts(0, 13465)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -329,8 +361,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `13471` - // Minimum execution time: 34_131_000 picoseconds. - Weight::from_parts(34_766_000, 0) + // Minimum execution time: 32_500_000 picoseconds. + Weight::from_parts(33_475_000, 0) .saturating_add(Weight::from_parts(0, 13471)) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) @@ -343,8 +375,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_525_000 picoseconds. - Weight::from_parts(3_724_000, 0) + // Minimum execution time: 3_484_000 picoseconds. + Weight::from_parts(3_673_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -355,8 +387,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 24_975_000 picoseconds. - Weight::from_parts(25_517_000, 0) + // Minimum execution time: 25_225_000 picoseconds. + Weight::from_parts(25_731_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -367,8 +399,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 33_761_000 picoseconds. - Weight::from_parts(34_674_000, 0) + // Minimum execution time: 33_961_000 picoseconds. + Weight::from_parts(34_818_000, 0) .saturating_add(Weight::from_parts(0, 3555)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_pallet_ethereum_client.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_pallet_ethereum_client.rs index 0d5f29c6ff2f..c8017939b627 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_pallet_ethereum_client.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_pallet_ethereum_client.rs @@ -126,26 +126,4 @@ impl snowbridge_pallet_ethereum_client::WeightInfo for .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: EthereumBeaconClient LatestFinalizedBlockRoot (r:1 w:0) - /// Proof: EthereumBeaconClient LatestFinalizedBlockRoot (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: EthereumBeaconClient FinalizedBeaconState (r:1 w:0) - /// Proof: EthereumBeaconClient FinalizedBeaconState (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: EthereumBeaconClient LatestExecutionState (r:1 w:1) - /// Proof: EthereumBeaconClient LatestExecutionState (max_values: Some(1), max_size: Some(80), added: 575, mode: MaxEncodedLen) - /// Storage: EthereumBeaconClient ExecutionHeaderIndex (r:1 w:1) - /// Proof: EthereumBeaconClient ExecutionHeaderIndex (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: EthereumBeaconClient ExecutionHeaderMapping (r:1 w:1) - /// Proof: EthereumBeaconClient ExecutionHeaderMapping (max_values: None, max_size: Some(36), added: 2511, mode: MaxEncodedLen) - /// Storage: EthereumBeaconClient ExecutionHeaders (r:0 w:1) - /// Proof: EthereumBeaconClient ExecutionHeaders (max_values: None, max_size: Some(136), added: 2611, mode: MaxEncodedLen) - fn submit_execution_header() -> Weight { - // Proof Size summary in bytes: - // Measured: `386` - // Estimated: `3537` - // Minimum execution time: 108_761_000 picoseconds. - Weight::from_parts(113_158_000, 0) - .saturating_add(Weight::from_parts(0, 3537)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) - } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_pallet_inbound_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_pallet_inbound_queue.rs index faf404f90cb3..153c1d363be1 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_pallet_inbound_queue.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_pallet_inbound_queue.rs @@ -58,12 +58,12 @@ impl snowbridge_pallet_inbound_queue::WeightInfo for We /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn submit() -> Weight { // Proof Size summary in bytes: - // Measured: `457` - // Estimated: `3601` - // Minimum execution time: 69_000_000 picoseconds. - Weight::from_parts(70_000_000, 0) - .saturating_add(Weight::from_parts(0, 3601)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `800` + // Estimated: `7200` + // Minimum execution time: 200_000_000 picoseconds. + Weight::from_parts(200_000_000, 0) + .saturating_add(Weight::from_parts(0, 7200)) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(6)) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs index ec20d6b5da08..544cf24a7dfa 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs @@ -336,6 +336,9 @@ impl xcm_executor::Config for XcmConfig { type SafeCallFilter = SafeCallFilter; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); } pub type PriceForParentDelivery = diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs index 239bd946e759..5960ab7b5505 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs @@ -187,15 +187,11 @@ fn construct_extrinsic( OnBridgeHubRococoRefundBridgeHubWestendMessages::default(), OnBridgeHubRococoRefundRococoBulletinMessages::default(), ), + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), ); let payload = SignedPayload::new(call.clone(), extra.clone()).unwrap(); let signature = payload.using_encoded(|e| sender.sign(e)); - UncheckedExtrinsic::new_signed( - call, - account_id.into(), - Signature::Sr25519(signature.clone()), - extra, - ) + UncheckedExtrinsic::new_signed(call, account_id.into(), Signature::Sr25519(signature), extra) } fn construct_and_apply_extrinsic( diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs index f11954cf165f..776c505fa640 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs @@ -64,15 +64,11 @@ fn construct_extrinsic( bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages::default(), bridge_to_bulletin_config::OnBridgeHubRococoRefundRococoBulletinMessages::default(), ), + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), ); let payload = SignedPayload::new(call.clone(), extra.clone()).unwrap(); let signature = payload.using_encoded(|e| sender.sign(e)); - UncheckedExtrinsic::new_signed( - call, - account_id.into(), - Signature::Sr25519(signature.clone()), - extra, - ) + UncheckedExtrinsic::new_signed(call, account_id.into(), Signature::Sr25519(signature), extra) } fn construct_and_apply_extrinsic( diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml index 01fc96ddd7f3..43f2e8a68103 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml @@ -16,7 +16,7 @@ substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1" } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } # Substrate @@ -70,31 +70,33 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } + pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } testnet-parachains-constants = { path = "../../constants", default-features = false, features = ["westend"] } # Bridges -bp-asset-hub-rococo = { path = "../../../../../bridges/primitives/chain-asset-hub-rococo", default-features = false } -bp-asset-hub-westend = { path = "../../../../../bridges/primitives/chain-asset-hub-westend", default-features = false } -bp-bridge-hub-rococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-rococo", default-features = false } -bp-bridge-hub-westend = { path = "../../../../../bridges/primitives/chain-bridge-hub-westend", default-features = false } +bp-asset-hub-rococo = { path = "../../../../../bridges/chains/chain-asset-hub-rococo", default-features = false } +bp-asset-hub-westend = { path = "../../../../../bridges/chains/chain-asset-hub-westend", default-features = false } +bp-bridge-hub-rococo = { path = "../../../../../bridges/chains/chain-bridge-hub-rococo", default-features = false } +bp-bridge-hub-westend = { path = "../../../../../bridges/chains/chain-bridge-hub-westend", default-features = false } bp-header-chain = { path = "../../../../../bridges/primitives/header-chain", default-features = false } bp-messages = { path = "../../../../../bridges/primitives/messages", default-features = false } bp-parachains = { path = "../../../../../bridges/primitives/parachains", default-features = false } bp-polkadot-core = { path = "../../../../../bridges/primitives/polkadot-core", default-features = false } bp-relayers = { path = "../../../../../bridges/primitives/relayers", default-features = false } bp-runtime = { path = "../../../../../bridges/primitives/runtime", default-features = false } -bp-rococo = { path = "../../../../../bridges/primitives/chain-rococo", default-features = false } -bp-westend = { path = "../../../../../bridges/primitives/chain-westend", default-features = false } +bp-rococo = { path = "../../../../../bridges/chains/chain-rococo", default-features = false } +bp-westend = { path = "../../../../../bridges/chains/chain-westend", default-features = false } pallet-bridge-grandpa = { path = "../../../../../bridges/modules/grandpa", default-features = false } pallet-bridge-messages = { path = "../../../../../bridges/modules/messages", default-features = false } pallet-bridge-parachains = { path = "../../../../../bridges/modules/parachains", default-features = false } pallet-bridge-relayers = { path = "../../../../../bridges/modules/relayers", default-features = false } pallet-xcm-bridge-hub = { path = "../../../../../bridges/modules/xcm-bridge-hub", default-features = false } bridge-runtime-common = { path = "../../../../../bridges/bin/runtime-common", default-features = false } -bridge-hub-common = { path = "../../bridge-hubs/common", default-features = false } +bridge-hub-common = { path = "../common", default-features = false } [dev-dependencies] static_assertions = "1.1" @@ -127,6 +129,7 @@ std = [ "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-aura/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "cumulus-primitives-utility/std", "frame-benchmarking/std", "frame-executive/std", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 599f1bcdd4ae..49e50aa55cee 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -112,6 +112,7 @@ pub type SignedExtra = ( pallet_transaction_payment::ChargeTransactionPayment, BridgeRejectObsoleteHeadersAndMessages, (bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages,), + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. @@ -179,7 +180,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("bridge-hub-westend"), impl_name: create_runtime_str!("bridge-hub-westend"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 4, @@ -351,6 +352,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; } impl cumulus_pallet_aura_ext::Config for Runtime {} @@ -1157,6 +1159,7 @@ mod tests { ( bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages::default(), ), + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new() ); { diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs index a78ff2355efa..9cf4c61466a1 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,8 +64,30 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 19_527_000 picoseconds. - Weight::from_parts(19_839_000, 0) + // Minimum execution time: 19_702_000 picoseconds. + Weight::from_parts(20_410_000, 0) + .saturating_add(Weight::from_parts(0, 3503)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn send_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `38` + // Estimated: `3503` + // Minimum execution time: 19_525_000 picoseconds. + Weight::from_parts(20_071_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -90,8 +112,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `107` // Estimated: `3593` - // Minimum execution time: 90_938_000 picoseconds. - Weight::from_parts(92_822_000, 0) + // Minimum execution time: 91_793_000 picoseconds. + Weight::from_parts(93_761_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -126,8 +148,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `107` // Estimated: `3593` - // Minimum execution time: 90_133_000 picoseconds. - Weight::from_parts(92_308_000, 0) + // Minimum execution time: 91_819_000 picoseconds. + Weight::from_parts(93_198_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -142,14 +164,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn execute_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_205_000 picoseconds. - Weight::from_parts(6_595_000, 0) + // Minimum execution time: 6_183_000 picoseconds. + Weight::from_parts(6_598_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -159,8 +191,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_927_000 picoseconds. - Weight::from_parts(2_062_000, 0) + // Minimum execution time: 1_987_000 picoseconds. + Weight::from_parts(2_076_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -186,8 +218,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 25_078_000 picoseconds. - Weight::from_parts(25_782_000, 0) + // Minimum execution time: 25_375_000 picoseconds. + Weight::from_parts(26_165_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -212,8 +244,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `255` // Estimated: `3720` - // Minimum execution time: 28_188_000 picoseconds. - Weight::from_parts(28_826_000, 0) + // Minimum execution time: 28_167_000 picoseconds. + Weight::from_parts(28_792_000, 0) .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -224,8 +256,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_886_000 picoseconds. - Weight::from_parts(1_991_000, 0) + // Minimum execution time: 2_039_000 picoseconds. + Weight::from_parts(2_211_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -235,8 +267,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89` // Estimated: `13454` - // Minimum execution time: 17_443_000 picoseconds. - Weight::from_parts(17_964_000, 0) + // Minimum execution time: 17_127_000 picoseconds. + Weight::from_parts(17_519_000, 0) .saturating_add(Weight::from_parts(0, 13454)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -247,8 +279,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `93` // Estimated: `13458` - // Minimum execution time: 17_357_000 picoseconds. - Weight::from_parts(18_006_000, 0) + // Minimum execution time: 16_701_000 picoseconds. + Weight::from_parts(17_250_000, 0) .saturating_add(Weight::from_parts(0, 13458)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -259,8 +291,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `15946` - // Minimum execution time: 18_838_000 picoseconds. - Weight::from_parts(19_688_000, 0) + // Minimum execution time: 18_795_000 picoseconds. + Weight::from_parts(19_302_000, 0) .saturating_add(Weight::from_parts(0, 15946)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -282,8 +314,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 25_517_000 picoseconds. - Weight::from_parts(26_131_000, 0) + // Minimum execution time: 25_007_000 picoseconds. + Weight::from_parts(25_786_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -294,8 +326,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `136` // Estimated: `11026` - // Minimum execution time: 11_587_000 picoseconds. - Weight::from_parts(11_963_000, 0) + // Minimum execution time: 11_534_000 picoseconds. + Weight::from_parts(11_798_000, 0) .saturating_add(Weight::from_parts(0, 11026)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -305,8 +337,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `100` // Estimated: `13465` - // Minimum execution time: 17_490_000 picoseconds. - Weight::from_parts(18_160_000, 0) + // Minimum execution time: 17_357_000 picoseconds. + Weight::from_parts(17_629_000, 0) .saturating_add(Weight::from_parts(0, 13465)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -329,8 +361,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `13471` - // Minimum execution time: 34_088_000 picoseconds. - Weight::from_parts(34_598_000, 0) + // Minimum execution time: 33_487_000 picoseconds. + Weight::from_parts(34_033_000, 0) .saturating_add(Weight::from_parts(0, 13471)) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) @@ -343,8 +375,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_566_000 picoseconds. - Weight::from_parts(3_754_000, 0) + // Minimum execution time: 3_688_000 picoseconds. + Weight::from_parts(3_854_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -355,8 +387,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 25_078_000 picoseconds. - Weight::from_parts(25_477_000, 0) + // Minimum execution time: 26_336_000 picoseconds. + Weight::from_parts(26_873_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -367,8 +399,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 34_661_000 picoseconds. - Weight::from_parts(35_411_000, 0) + // Minimum execution time: 34_633_000 picoseconds. + Weight::from_parts(35_171_000, 0) .saturating_add(Weight::from_parts(0, 3555)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs index e18df6feda82..840d0c9af0e5 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs @@ -264,6 +264,9 @@ impl xcm_executor::Config for XcmConfig { type SafeCallFilter = SafeCallFilter; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); } pub type PriceForParentDelivery = diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs index 149a3bbeb75d..988b10e1e2d8 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs @@ -78,15 +78,11 @@ fn construct_extrinsic( pallet_transaction_payment::ChargeTransactionPayment::::from(0), BridgeRejectObsoleteHeadersAndMessages::default(), (bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages::default(),), + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), ); let payload = SignedPayload::new(call.clone(), extra.clone()).unwrap(); let signature = payload.using_encoded(|e| sender.sign(e)); - UncheckedExtrinsic::new_signed( - call, - account_id.into(), - Signature::Sr25519(signature.clone()), - extra, - ) + UncheckedExtrinsic::new_signed(call, account_id.into(), Signature::Sr25519(signature), extra) } fn construct_and_apply_extrinsic( diff --git a/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml index 870b1a944f53..29ac88cf1e5a 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml @@ -8,7 +8,7 @@ license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../../../../substrate/frame/support", default-features = false } sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml index 1f963b61f0da..4b61ac1bcfa0 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml @@ -13,7 +13,7 @@ workspace = true codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } hex-literal = { version = "0.4.1" } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } @@ -76,6 +76,8 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } + pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } pallet-collective-content = { path = "../../../pallets/collective-content", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } @@ -171,6 +173,7 @@ std = [ "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-aura/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index c13faba7b7b3..09f0fa56d025 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -122,7 +122,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("collectives-westend"), impl_name: create_runtime_str!("collectives-westend"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 5, @@ -428,6 +428,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; } impl cumulus_pallet_aura_ext::Config for Runtime {} @@ -717,6 +718,7 @@ pub type SignedExtra = ( frame_system::CheckEra, frame_system::CheckNonce, frame_system::CheckWeight, + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs index 5d427d850046..0edd5dfff2b8 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,8 +64,30 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 21_813_000 picoseconds. - Weight::from_parts(22_332_000, 0) + // Minimum execution time: 21_911_000 picoseconds. + Weight::from_parts(22_431_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn send_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 22_143_000 picoseconds. + Weight::from_parts(22_843_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -90,8 +112,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `214` // Estimated: `3679` - // Minimum execution time: 93_243_000 picoseconds. - Weight::from_parts(95_650_000, 0) + // Minimum execution time: 96_273_000 picoseconds. + Weight::from_parts(98_351_000, 0) .saturating_add(Weight::from_parts(0, 3679)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -126,8 +148,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `214` // Estimated: `3679` - // Minimum execution time: 96_199_000 picoseconds. - Weight::from_parts(98_620_000, 0) + // Minimum execution time: 95_571_000 picoseconds. + Weight::from_parts(96_251_000, 0) .saturating_add(Weight::from_parts(0, 3679)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -142,14 +164,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn execute_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_442_000 picoseconds. - Weight::from_parts(6_682_000, 0) + // Minimum execution time: 6_227_000 picoseconds. + Weight::from_parts(6_419_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -159,8 +191,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_833_000 picoseconds. - Weight::from_parts(1_973_000, 0) + // Minimum execution time: 1_851_000 picoseconds. + Weight::from_parts(1_940_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -186,8 +218,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 27_318_000 picoseconds. - Weight::from_parts(28_224_000, 0) + // Minimum execution time: 27_449_000 picoseconds. + Weight::from_parts(28_513_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -212,8 +244,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `363` // Estimated: `3828` - // Minimum execution time: 29_070_000 picoseconds. - Weight::from_parts(30_205_000, 0) + // Minimum execution time: 29_477_000 picoseconds. + Weight::from_parts(30_251_000, 0) .saturating_add(Weight::from_parts(0, 3828)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -224,8 +256,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_904_000 picoseconds. - Weight::from_parts(2_033_000, 0) + // Minimum execution time: 1_894_000 picoseconds. + Weight::from_parts(2_009_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -235,8 +267,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `159` // Estimated: `13524` - // Minimum execution time: 18_348_000 picoseconds. - Weight::from_parts(18_853_000, 0) + // Minimum execution time: 17_991_000 picoseconds. + Weight::from_parts(18_651_000, 0) .saturating_add(Weight::from_parts(0, 13524)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -247,8 +279,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `163` // Estimated: `13528` - // Minimum execution time: 17_964_000 picoseconds. - Weight::from_parts(18_548_000, 0) + // Minimum execution time: 18_321_000 picoseconds. + Weight::from_parts(18_701_000, 0) .saturating_add(Weight::from_parts(0, 13528)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -259,8 +291,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `16013` - // Minimum execution time: 19_708_000 picoseconds. - Weight::from_parts(20_157_000, 0) + // Minimum execution time: 19_762_000 picoseconds. + Weight::from_parts(20_529_000, 0) .saturating_add(Weight::from_parts(0, 16013)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -282,8 +314,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `6152` - // Minimum execution time: 26_632_000 picoseconds. - Weight::from_parts(27_314_000, 0) + // Minimum execution time: 26_927_000 picoseconds. + Weight::from_parts(27_629_000, 0) .saturating_add(Weight::from_parts(0, 6152)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -294,8 +326,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `206` // Estimated: `11096` - // Minimum execution time: 11_929_000 picoseconds. - Weight::from_parts(12_304_000, 0) + // Minimum execution time: 11_957_000 picoseconds. + Weight::from_parts(12_119_000, 0) .saturating_add(Weight::from_parts(0, 11096)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -305,8 +337,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `170` // Estimated: `13535` - // Minimum execution time: 18_599_000 picoseconds. - Weight::from_parts(19_195_000, 0) + // Minimum execution time: 17_942_000 picoseconds. + Weight::from_parts(18_878_000, 0) .saturating_add(Weight::from_parts(0, 13535)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -329,8 +361,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `13577` - // Minimum execution time: 35_524_000 picoseconds. - Weight::from_parts(36_272_000, 0) + // Minimum execution time: 35_640_000 picoseconds. + Weight::from_parts(36_340_000, 0) .saturating_add(Weight::from_parts(0, 13577)) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) @@ -344,7 +376,7 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Measured: `103` // Estimated: `1588` // Minimum execution time: 4_044_000 picoseconds. - Weight::from_parts(4_238_000, 0) + Weight::from_parts(4_229_000, 0) .saturating_add(Weight::from_parts(0, 1588)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -355,8 +387,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7740` // Estimated: `11205` - // Minimum execution time: 25_741_000 picoseconds. - Weight::from_parts(26_301_000, 0) + // Minimum execution time: 26_262_000 picoseconds. + Weight::from_parts(26_842_000, 0) .saturating_add(Weight::from_parts(0, 11205)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -367,8 +399,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `160` // Estimated: `3625` - // Minimum execution time: 35_925_000 picoseconds. - Weight::from_parts(36_978_000, 0) + // Minimum execution time: 36_775_000 picoseconds. + Weight::from_parts(37_265_000, 0) .saturating_add(Weight::from_parts(0, 3625)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs index 87f637d5b59a..b83106a58284 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs @@ -288,6 +288,9 @@ impl xcm_executor::Config for XcmConfig { type SafeCallFilter = SafeCallFilter; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); } /// Converts a local signed origin into an XCM location. diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml index 8b457148fcea..80b53afefe08 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml @@ -19,7 +19,7 @@ substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1", optional = true } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } @@ -73,6 +73,8 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } + pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } @@ -89,6 +91,7 @@ std = [ "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-aura/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index 92aa9e4d5323..538d7e697c8e 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -95,6 +95,7 @@ pub type SignedExtra = ( frame_system::CheckNonce, frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = @@ -138,7 +139,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("contracts-rococo"), impl_name: create_runtime_str!("contracts-rococo"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, @@ -323,6 +324,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; } impl cumulus_pallet_aura_ext::Config for Runtime {} diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs index e8f3209eb67f..ac6fe634662f 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs @@ -197,6 +197,9 @@ impl xcm_executor::Config for XcmConfig { type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); } /// Converts a local signed origin into an XCM location. diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml index 65d0e940cc13..ae059728f392 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml @@ -71,6 +71,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } @@ -87,6 +88,7 @@ std = [ "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-aura/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index b835c2a46e69..8f700eb4e26f 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -103,6 +103,7 @@ pub type SignedExtra = ( frame_system::CheckNonce, frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. @@ -137,7 +138,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("coretime-rococo"), impl_name: create_runtime_str!("coretime-rococo"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 0, @@ -305,6 +306,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; } impl parachain_info::Config for Runtime {} diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs index 2d30ddc612cb..89b1c4c86632 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs @@ -16,26 +16,24 @@ //! Autogenerated weights for `pallet_broker` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2024-01-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-j8vvqcjr-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=coretime-rococo-dev -// --wasm-execution=compiled -// --pallet=pallet_broker -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_broker +// --chain=coretime-rococo-dev // --header=./cumulus/file_header.txt // --output=./cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/ @@ -56,8 +54,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_462_000 picoseconds. - Weight::from_parts(2_552_000, 0) + // Minimum execution time: 1_918_000 picoseconds. + Weight::from_parts(2_092_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -67,8 +65,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `10888` // Estimated: `13506` - // Minimum execution time: 25_494_000 picoseconds. - Weight::from_parts(26_063_000, 0) + // Minimum execution time: 21_943_000 picoseconds. + Weight::from_parts(22_570_000, 0) .saturating_add(Weight::from_parts(0, 13506)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -79,8 +77,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `12090` // Estimated: `13506` - // Minimum execution time: 22_299_000 picoseconds. - Weight::from_parts(22_911_000, 0) + // Minimum execution time: 20_923_000 picoseconds. + Weight::from_parts(21_354_000, 0) .saturating_add(Weight::from_parts(0, 13506)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -95,8 +93,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `466` // Estimated: `1951` - // Minimum execution time: 11_590_000 picoseconds. - Weight::from_parts(12_007_000, 0) + // Minimum execution time: 10_687_000 picoseconds. + Weight::from_parts(11_409_000, 0) .saturating_add(Weight::from_parts(0, 1951)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -124,11 +122,11 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `12567` // Estimated: `14052` - // Minimum execution time: 120_928_000 picoseconds. - Weight::from_parts(124_947_252, 0) + // Minimum execution time: 111_288_000 picoseconds. + Weight::from_parts(117_804_282, 0) .saturating_add(Weight::from_parts(0, 14052)) - // Standard Error: 435 - .saturating_add(Weight::from_parts(1_246, 0).saturating_mul(n.into())) + // Standard Error: 391 + .saturating_add(Weight::from_parts(1_243, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(66)) } @@ -144,8 +142,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `316` // Estimated: `3593` - // Minimum execution time: 32_826_000 picoseconds. - Weight::from_parts(33_889_000, 0) + // Minimum execution time: 33_006_000 picoseconds. + Weight::from_parts(34_256_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -166,8 +164,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `434` // Estimated: `4698` - // Minimum execution time: 57_362_000 picoseconds. - Weight::from_parts(58_994_000, 0) + // Minimum execution time: 61_473_000 picoseconds. + Weight::from_parts(66_476_000, 0) .saturating_add(Weight::from_parts(0, 4698)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) @@ -178,8 +176,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `357` // Estimated: `3550` - // Minimum execution time: 13_982_000 picoseconds. - Weight::from_parts(14_447_000, 0) + // Minimum execution time: 13_771_000 picoseconds. + Weight::from_parts(14_374_000, 0) .saturating_add(Weight::from_parts(0, 3550)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -190,8 +188,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `357` // Estimated: `3550` - // Minimum execution time: 15_070_000 picoseconds. - Weight::from_parts(15_735_000, 0) + // Minimum execution time: 15_162_000 picoseconds. + Weight::from_parts(15_742_000, 0) .saturating_add(Weight::from_parts(0, 3550)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -202,8 +200,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `357` // Estimated: `3550` - // Minimum execution time: 16_527_000 picoseconds. - Weight::from_parts(16_894_000, 0) + // Minimum execution time: 16_196_000 picoseconds. + Weight::from_parts(16_796_000, 0) .saturating_add(Weight::from_parts(0, 3550)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(3)) @@ -220,8 +218,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `936` // Estimated: `4681` - // Minimum execution time: 25_493_000 picoseconds. - Weight::from_parts(26_091_000, 0) + // Minimum execution time: 25_653_000 picoseconds. + Weight::from_parts(27_006_000, 0) .saturating_add(Weight::from_parts(0, 4681)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -240,8 +238,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1002` // Estimated: `5996` - // Minimum execution time: 31_498_000 picoseconds. - Weight::from_parts(32_560_000, 0) + // Minimum execution time: 31_114_000 picoseconds. + Weight::from_parts(32_235_000, 0) .saturating_add(Weight::from_parts(0, 5996)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(5)) @@ -257,11 +255,11 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `652` // Estimated: `6196 + m * (2520 ±0)` - // Minimum execution time: 57_183_000 picoseconds. - Weight::from_parts(58_024_898, 0) + // Minimum execution time: 57_280_000 picoseconds. + Weight::from_parts(58_127_480, 0) .saturating_add(Weight::from_parts(0, 6196)) - // Standard Error: 35_831 - .saturating_add(Weight::from_parts(1_384_446, 0).saturating_mul(m.into())) + // Standard Error: 41_670 + .saturating_add(Weight::from_parts(1_203_066, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(T::DbWeight::get().writes(5)) @@ -283,8 +281,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `215` // Estimated: `3680` - // Minimum execution time: 59_762_000 picoseconds. - Weight::from_parts(61_114_000, 0) + // Minimum execution time: 59_968_000 picoseconds. + Weight::from_parts(62_315_000, 0) .saturating_add(Weight::from_parts(0, 3680)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) @@ -297,8 +295,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `465` // Estimated: `3550` - // Minimum execution time: 41_473_000 picoseconds. - Weight::from_parts(44_155_000, 0) + // Minimum execution time: 50_887_000 picoseconds. + Weight::from_parts(57_366_000, 0) .saturating_add(Weight::from_parts(0, 3550)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -313,8 +311,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `463` // Estimated: `3533` - // Minimum execution time: 56_672_000 picoseconds. - Weight::from_parts(58_086_000, 0) + // Minimum execution time: 84_472_000 picoseconds. + Weight::from_parts(96_536_000, 0) .saturating_add(Weight::from_parts(0, 3533)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -331,8 +329,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `857` // Estimated: `3593` - // Minimum execution time: 64_460_000 picoseconds. - Weight::from_parts(65_894_000, 0) + // Minimum execution time: 96_371_000 picoseconds. + Weight::from_parts(104_659_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) @@ -345,8 +343,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `957` // Estimated: `4698` - // Minimum execution time: 37_447_000 picoseconds. - Weight::from_parts(42_318_000, 0) + // Minimum execution time: 51_741_000 picoseconds. + Weight::from_parts(54_461_000, 0) .saturating_add(Weight::from_parts(0, 4698)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -366,8 +364,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 21_219_000 picoseconds. - Weight::from_parts(22_084_648, 0) + // Minimum execution time: 19_901_000 picoseconds. + Weight::from_parts(21_028_116, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -379,11 +377,11 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `266` // Estimated: `1487` - // Minimum execution time: 5_792_000 picoseconds. - Weight::from_parts(6_358_588, 0) + // Minimum execution time: 5_987_000 picoseconds. + Weight::from_parts(6_412_478, 0) .saturating_add(Weight::from_parts(0, 1487)) - // Standard Error: 20 - .saturating_add(Weight::from_parts(26, 0).saturating_mul(n.into())) + // Standard Error: 16 + .saturating_add(Weight::from_parts(47, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -397,8 +395,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `447` // Estimated: `6196` - // Minimum execution time: 38_690_000 picoseconds. - Weight::from_parts(39_706_000, 0) + // Minimum execution time: 38_623_000 picoseconds. + Weight::from_parts(39_773_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) @@ -414,15 +412,13 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Storage: `Broker::Workplan` (r:0 w:60) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn rotate_sale(n: u32, ) -> Weight { + fn rotate_sale(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `12514` // Estimated: `13506` - // Minimum execution time: 93_531_000 picoseconds. - Weight::from_parts(95_836_318, 0) + // Minimum execution time: 97_074_000 picoseconds. + Weight::from_parts(101_247_740, 0) .saturating_add(Weight::from_parts(0, 13506)) - // Standard Error: 113 - .saturating_add(Weight::from_parts(329, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(65)) } @@ -434,8 +430,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `42` // Estimated: `3493` - // Minimum execution time: 6_506_000 picoseconds. - Weight::from_parts(6_783_000, 0) + // Minimum execution time: 6_317_000 picoseconds. + Weight::from_parts(6_521_000, 0) .saturating_add(Weight::from_parts(0, 3493)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -458,8 +454,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1321` // Estimated: `4786` - // Minimum execution time: 31_927_000 picoseconds. - Weight::from_parts(32_748_000, 0) + // Minimum execution time: 32_575_000 picoseconds. + Weight::from_parts(33_299_000, 0) .saturating_add(Weight::from_parts(0, 4786)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -478,8 +474,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 15_682_000 picoseconds. - Weight::from_parts(16_012_000, 0) + // Minimum execution time: 15_256_000 picoseconds. + Weight::from_parts(15_927_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -490,8 +486,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_147_000 picoseconds. - Weight::from_parts(2_281_000, 0) + // Minimum execution time: 1_783_000 picoseconds. + Weight::from_parts(1_904_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -509,10 +505,22 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `398` // Estimated: `3863` - // Minimum execution time: 12_015_000 picoseconds. - Weight::from_parts(12_619_000, 0) + // Minimum execution time: 12_307_000 picoseconds. + Weight::from_parts(12_967_000, 0) .saturating_add(Weight::from_parts(0, 3863)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `Broker::Leases` (r:1 w:1) + /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(401), added: 896, mode: `MaxEncodedLen`) + fn swap_leases() -> Weight { + // Proof Size summary in bytes: + // Measured: `470` + // Estimated: `1886` + // Minimum execution time: 6_597_000 picoseconds. + Weight::from_parts(6_969_000, 0) + .saturating_add(Weight::from_parts(0, 1886)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs index c5d315467c1e..df0044089c8f 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -62,8 +62,28 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 35_051_000 picoseconds. - Weight::from_parts(35_200_000, 0) + // Minimum execution time: 18_767_000 picoseconds. + Weight::from_parts(19_420_000, 0) + .saturating_add(Weight::from_parts(0, 3539)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn send_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `74` + // Estimated: `3539` + // Minimum execution time: 19_184_000 picoseconds. + Weight::from_parts(19_695_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -84,8 +104,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 56_235_000 picoseconds. - Weight::from_parts(58_178_000, 0) + // Minimum execution time: 58_120_000 picoseconds. + Weight::from_parts(59_533_000, 0) .saturating_add(Weight::from_parts(0, 3571)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -120,14 +140,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn execute_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_226_000 picoseconds. - Weight::from_parts(6_403_000, 0) + // Minimum execution time: 6_074_000 picoseconds. + Weight::from_parts(6_398_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -137,8 +167,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_020_000 picoseconds. - Weight::from_parts(2_100_000, 0) + // Minimum execution time: 2_036_000 picoseconds. + Weight::from_parts(2_180_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -162,8 +192,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 24_387_000 picoseconds. - Weight::from_parts(24_814_000, 0) + // Minimum execution time: 25_014_000 picoseconds. + Weight::from_parts(25_374_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) @@ -186,8 +216,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `292` // Estimated: `3757` - // Minimum execution time: 27_039_000 picoseconds. - Weight::from_parts(27_693_000, 0) + // Minimum execution time: 27_616_000 picoseconds. + Weight::from_parts(28_499_000, 0) .saturating_add(Weight::from_parts(0, 3757)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -198,8 +228,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_920_000 picoseconds. - Weight::from_parts(2_082_000, 0) + // Minimum execution time: 2_061_000 picoseconds. + Weight::from_parts(2_153_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -209,8 +239,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89` // Estimated: `13454` - // Minimum execution time: 17_141_000 picoseconds. - Weight::from_parts(17_500_000, 0) + // Minimum execution time: 16_592_000 picoseconds. + Weight::from_parts(16_900_000, 0) .saturating_add(Weight::from_parts(0, 13454)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -221,8 +251,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `93` // Estimated: `13458` - // Minimum execution time: 17_074_000 picoseconds. - Weight::from_parts(17_431_000, 0) + // Minimum execution time: 16_694_000 picoseconds. + Weight::from_parts(16_905_000, 0) .saturating_add(Weight::from_parts(0, 13458)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -233,8 +263,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `15946` - // Minimum execution time: 19_139_000 picoseconds. - Weight::from_parts(19_474_000, 0) + // Minimum execution time: 17_779_000 picoseconds. + Weight::from_parts(18_490_000, 0) .saturating_add(Weight::from_parts(0, 15946)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -254,8 +284,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `6082` - // Minimum execution time: 24_346_000 picoseconds. - Weight::from_parts(25_318_000, 0) + // Minimum execution time: 24_526_000 picoseconds. + Weight::from_parts(25_182_000, 0) .saturating_add(Weight::from_parts(0, 6082)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) @@ -266,8 +296,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `136` // Estimated: `11026` - // Minimum execution time: 11_777_000 picoseconds. - Weight::from_parts(12_051_000, 0) + // Minimum execution time: 10_467_000 picoseconds. + Weight::from_parts(10_934_000, 0) .saturating_add(Weight::from_parts(0, 11026)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -277,8 +307,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `100` // Estimated: `13465` - // Minimum execution time: 17_538_000 picoseconds. - Weight::from_parts(17_832_000, 0) + // Minimum execution time: 16_377_000 picoseconds. + Weight::from_parts(17_114_000, 0) .saturating_add(Weight::from_parts(0, 13465)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -299,8 +329,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `13507` - // Minimum execution time: 33_623_000 picoseconds. - Weight::from_parts(34_186_000, 0) + // Minimum execution time: 32_575_000 picoseconds. + Weight::from_parts(33_483_000, 0) .saturating_add(Weight::from_parts(0, 13507)) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) @@ -313,8 +343,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_363_000 picoseconds. - Weight::from_parts(3_511_000, 0) + // Minimum execution time: 3_604_000 picoseconds. + Weight::from_parts(3_744_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -325,8 +355,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 23_969_000 picoseconds. - Weight::from_parts(24_347_000, 0) + // Minimum execution time: 23_983_000 picoseconds. + Weight::from_parts(24_404_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -337,8 +367,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 34_071_000 picoseconds. - Weight::from_parts(35_031_000, 0) + // Minimum execution time: 34_446_000 picoseconds. + Weight::from_parts(35_465_000, 0) .saturating_add(Weight::from_parts(0, 3555)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs index 1722e1fcb319..955f2eeba339 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs @@ -257,6 +257,9 @@ impl xcm_executor::Config for XcmConfig { type SafeCallFilter = SafeCallFilter; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); } /// Converts a local signed origin into an XCM location. Forms the basis for local origins diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml index 73c1f7274772..06b2ed1e0522 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml @@ -70,6 +70,8 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } + pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } @@ -86,6 +88,7 @@ std = [ "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-aura/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 9963fd6c06ed..29bca372f010 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -103,6 +103,7 @@ pub type SignedExtra = ( frame_system::CheckNonce, frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. @@ -137,7 +138,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("coretime-westend"), impl_name: create_runtime_str!("coretime-westend"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 0, @@ -305,6 +306,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; } impl parachain_info::Config for Runtime {} diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs index 8727b9633b1f..13d5fcf3898b 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs @@ -17,25 +17,23 @@ //! Autogenerated weights for `pallet_broker` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-westend-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=coretime-westend-dev -// --wasm-execution=compiled -// --pallet=pallet_broker -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_broker +// --chain=coretime-westend-dev // --header=./cumulus/file_header.txt // --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/ @@ -56,8 +54,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_944_000 picoseconds. - Weight::from_parts(2_045_000, 0) + // Minimum execution time: 1_897_000 picoseconds. + Weight::from_parts(2_053_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -67,8 +65,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `10888` // Estimated: `13506` - // Minimum execution time: 21_158_000 picoseconds. - Weight::from_parts(21_572_000, 0) + // Minimum execution time: 22_550_000 picoseconds. + Weight::from_parts(22_871_000, 0) .saturating_add(Weight::from_parts(0, 13506)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -79,8 +77,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `12090` // Estimated: `13506` - // Minimum execution time: 20_497_000 picoseconds. - Weight::from_parts(20_995_000, 0) + // Minimum execution time: 21_170_000 picoseconds. + Weight::from_parts(21_645_000, 0) .saturating_add(Weight::from_parts(0, 13506)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -95,8 +93,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `146` // Estimated: `1631` - // Minimum execution time: 10_280_000 picoseconds. - Weight::from_parts(10_686_000, 0) + // Minimum execution time: 10_494_000 picoseconds. + Weight::from_parts(10_942_000, 0) .saturating_add(Weight::from_parts(0, 1631)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -120,15 +118,13 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Storage: `Broker::Workplan` (r:0 w:20) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn start_sales(n: u32, ) -> Weight { + fn start_sales(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `12247` // Estimated: `13732` - // Minimum execution time: 61_020_000 picoseconds. - Weight::from_parts(63_240_622, 0) + // Minimum execution time: 61_014_000 picoseconds. + Weight::from_parts(63_267_651, 0) .saturating_add(Weight::from_parts(0, 13732)) - // Standard Error: 102 - .saturating_add(Weight::from_parts(255, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(26)) } @@ -144,8 +140,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `316` // Estimated: `3593` - // Minimum execution time: 30_627_000 picoseconds. - Weight::from_parts(31_648_000, 0) + // Minimum execution time: 30_931_000 picoseconds. + Weight::from_parts(31_941_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -166,8 +162,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `434` // Estimated: `4698` - // Minimum execution time: 57_701_000 picoseconds. - Weight::from_parts(59_825_000, 0) + // Minimum execution time: 57_466_000 picoseconds. + Weight::from_parts(65_042_000, 0) .saturating_add(Weight::from_parts(0, 4698)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) @@ -178,8 +174,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `357` // Estimated: `3550` - // Minimum execution time: 12_898_000 picoseconds. - Weight::from_parts(13_506_000, 0) + // Minimum execution time: 12_799_000 picoseconds. + Weight::from_parts(13_401_000, 0) .saturating_add(Weight::from_parts(0, 3550)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -190,8 +186,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `357` // Estimated: `3550` - // Minimum execution time: 14_284_000 picoseconds. - Weight::from_parts(14_791_000, 0) + // Minimum execution time: 14_107_000 picoseconds. + Weight::from_parts(14_630_000, 0) .saturating_add(Weight::from_parts(0, 3550)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -202,8 +198,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `357` // Estimated: `3550` - // Minimum execution time: 15_570_000 picoseconds. - Weight::from_parts(16_158_000, 0) + // Minimum execution time: 15_254_000 picoseconds. + Weight::from_parts(16_062_000, 0) .saturating_add(Weight::from_parts(0, 3550)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(3)) @@ -220,8 +216,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `735` // Estimated: `4681` - // Minimum execution time: 23_329_000 picoseconds. - Weight::from_parts(24_196_000, 0) + // Minimum execution time: 23_557_000 picoseconds. + Weight::from_parts(24_382_000, 0) .saturating_add(Weight::from_parts(0, 4681)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -240,8 +236,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `801` // Estimated: `5996` - // Minimum execution time: 29_288_000 picoseconds. - Weight::from_parts(30_066_000, 0) + // Minimum execution time: 29_371_000 picoseconds. + Weight::from_parts(30_200_000, 0) .saturating_add(Weight::from_parts(0, 5996)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(5)) @@ -257,11 +253,11 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `652` // Estimated: `6196 + m * (2520 ±0)` - // Minimum execution time: 54_833_000 picoseconds. - Weight::from_parts(55_577_423, 0) + // Minimum execution time: 54_331_000 picoseconds. + Weight::from_parts(55_322_165, 0) .saturating_add(Weight::from_parts(0, 6196)) - // Standard Error: 35_105 - .saturating_add(Weight::from_parts(1_267_911, 0).saturating_mul(m.into())) + // Standard Error: 35_225 + .saturating_add(Weight::from_parts(1_099_614, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(T::DbWeight::get().writes(5)) @@ -283,8 +279,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `215` // Estimated: `3680` - // Minimum execution time: 55_289_000 picoseconds. - Weight::from_parts(56_552_000, 0) + // Minimum execution time: 53_789_000 picoseconds. + Weight::from_parts(55_439_000, 0) .saturating_add(Weight::from_parts(0, 3680)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) @@ -297,8 +293,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `465` // Estimated: `3550` - // Minimum execution time: 39_736_000 picoseconds. - Weight::from_parts(41_346_000, 0) + // Minimum execution time: 43_941_000 picoseconds. + Weight::from_parts(49_776_000, 0) .saturating_add(Weight::from_parts(0, 3550)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -313,8 +309,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `463` // Estimated: `3533` - // Minimum execution time: 57_319_000 picoseconds. - Weight::from_parts(60_204_000, 0) + // Minimum execution time: 64_917_000 picoseconds. + Weight::from_parts(70_403_000, 0) .saturating_add(Weight::from_parts(0, 3533)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -331,8 +327,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `857` // Estimated: `3593` - // Minimum execution time: 85_216_000 picoseconds. - Weight::from_parts(91_144_000, 0) + // Minimum execution time: 72_633_000 picoseconds. + Weight::from_parts(79_305_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) @@ -345,8 +341,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `556` // Estimated: `4698` - // Minimum execution time: 32_331_000 picoseconds. - Weight::from_parts(39_877_000, 0) + // Minimum execution time: 36_643_000 picoseconds. + Weight::from_parts(48_218_000, 0) .saturating_add(Weight::from_parts(0, 4698)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -362,28 +358,28 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 1000]`. - fn request_core_count(n: u32, ) -> Weight { + fn request_core_count(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 18_128_000 picoseconds. - Weight::from_parts(19_061_234, 0) + // Minimum execution time: 17_617_000 picoseconds. + Weight::from_parts(18_904_788, 0) .saturating_add(Weight::from_parts(0, 3539)) - // Standard Error: 48 - .saturating_add(Weight::from_parts(141, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Broker::CoreCountInbox` (r:1 w:1) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn process_core_count(_n: u32, ) -> Weight { + fn process_core_count(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `266` // Estimated: `1487` - // Minimum execution time: 5_368_000 picoseconds. - Weight::from_parts(5_837_005, 0) + // Minimum execution time: 5_575_000 picoseconds. + Weight::from_parts(5_887_598, 0) .saturating_add(Weight::from_parts(0, 1487)) + // Standard Error: 16 + .saturating_add(Weight::from_parts(41, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -397,8 +393,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `447` // Estimated: `6196` - // Minimum execution time: 36_047_000 picoseconds. - Weight::from_parts(37_101_000, 0) + // Minimum execution time: 36_415_000 picoseconds. + Weight::from_parts(37_588_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) @@ -414,13 +410,15 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Storage: `Broker::Workplan` (r:0 w:20) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn rotate_sale(_n: u32, ) -> Weight { + fn rotate_sale(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `12194` // Estimated: `13506` - // Minimum execution time: 48_158_000 picoseconds. - Weight::from_parts(49_891_920, 0) + // Minimum execution time: 48_362_000 picoseconds. + Weight::from_parts(49_616_106, 0) .saturating_add(Weight::from_parts(0, 13506)) + // Standard Error: 61 + .saturating_add(Weight::from_parts(59, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(25)) } @@ -432,8 +430,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `42` // Estimated: `3493` - // Minimum execution time: 5_911_000 picoseconds. - Weight::from_parts(6_173_000, 0) + // Minimum execution time: 6_148_000 picoseconds. + Weight::from_parts(6_374_000, 0) .saturating_add(Weight::from_parts(0, 3493)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -456,8 +454,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1321` // Estimated: `4786` - // Minimum execution time: 30_140_000 picoseconds. - Weight::from_parts(30_912_000, 0) + // Minimum execution time: 30_267_000 picoseconds. + Weight::from_parts(30_825_000, 0) .saturating_add(Weight::from_parts(0, 4786)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -476,8 +474,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 13_684_000 picoseconds. - Weight::from_parts(14_252_000, 0) + // Minimum execution time: 13_491_000 picoseconds. + Weight::from_parts(13_949_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -488,8 +486,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_718_000 picoseconds. - Weight::from_parts(1_843_000, 0) + // Minimum execution time: 1_711_000 picoseconds. + Weight::from_parts(1_913_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -507,10 +505,22 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `398` // Estimated: `3863` - // Minimum execution time: 11_771_000 picoseconds. - Weight::from_parts(12_120_000, 0) + // Minimum execution time: 12_035_000 picoseconds. + Weight::from_parts(12_383_000, 0) .saturating_add(Weight::from_parts(0, 3863)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `Broker::Leases` (r:1 w:1) + /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(81), added: 576, mode: `MaxEncodedLen`) + fn swap_leases() -> Weight { + // Proof Size summary in bytes: + // Measured: `150` + // Estimated: `1566` + // Minimum execution time: 6_142_000 picoseconds. + Weight::from_parts(6_538_000, 0) + .saturating_add(Weight::from_parts(0, 1566)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs index 0082db3099d0..a1701c5f1c2c 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -62,8 +62,28 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 18_410_000 picoseconds. - Weight::from_parts(18_657_000, 0) + // Minimum execution time: 17_681_000 picoseconds. + Weight::from_parts(18_350_000, 0) + .saturating_add(Weight::from_parts(0, 3539)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn send_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `74` + // Estimated: `3539` + // Minimum execution time: 18_091_000 picoseconds. + Weight::from_parts(18_327_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -84,8 +104,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 56_616_000 picoseconds. - Weight::from_parts(57_751_000, 0) + // Minimum execution time: 54_943_000 picoseconds. + Weight::from_parts(56_519_000, 0) .saturating_add(Weight::from_parts(0, 3571)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -120,14 +140,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn execute_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_014_000 picoseconds. - Weight::from_parts(6_412_000, 0) + // Minimum execution time: 5_887_000 picoseconds. + Weight::from_parts(6_101_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -137,8 +167,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_844_000 picoseconds. - Weight::from_parts(1_957_000, 0) + // Minimum execution time: 1_940_000 picoseconds. + Weight::from_parts(2_022_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -162,8 +192,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 24_067_000 picoseconds. - Weight::from_parts(24_553_000, 0) + // Minimum execution time: 23_165_000 picoseconds. + Weight::from_parts(23_800_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) @@ -186,8 +216,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `292` // Estimated: `3757` - // Minimum execution time: 27_023_000 picoseconds. - Weight::from_parts(27_620_000, 0) + // Minimum execution time: 26_506_000 picoseconds. + Weight::from_parts(27_180_000, 0) .saturating_add(Weight::from_parts(0, 3757)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -198,8 +228,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_866_000 picoseconds. - Weight::from_parts(1_984_000, 0) + // Minimum execution time: 1_868_000 picoseconds. + Weight::from_parts(2_002_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -209,8 +239,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89` // Estimated: `13454` - // Minimum execution time: 16_425_000 picoseconds. - Weight::from_parts(16_680_000, 0) + // Minimum execution time: 16_138_000 picoseconds. + Weight::from_parts(16_447_000, 0) .saturating_add(Weight::from_parts(0, 13454)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -221,8 +251,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `93` // Estimated: `13458` - // Minimum execution time: 16_171_000 picoseconds. - Weight::from_parts(16_564_000, 0) + // Minimum execution time: 16_099_000 picoseconds. + Weight::from_parts(16_592_000, 0) .saturating_add(Weight::from_parts(0, 13458)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -233,8 +263,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `15946` - // Minimum execution time: 17_785_000 picoseconds. - Weight::from_parts(18_123_000, 0) + // Minimum execution time: 17_972_000 picoseconds. + Weight::from_parts(18_379_000, 0) .saturating_add(Weight::from_parts(0, 15946)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -254,8 +284,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `6082` - // Minimum execution time: 23_903_000 picoseconds. - Weight::from_parts(24_769_000, 0) + // Minimum execution time: 23_554_000 picoseconds. + Weight::from_parts(24_446_000, 0) .saturating_add(Weight::from_parts(0, 6082)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) @@ -266,8 +296,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `136` // Estimated: `11026` - // Minimum execution time: 10_617_000 picoseconds. - Weight::from_parts(10_843_000, 0) + // Minimum execution time: 10_541_000 picoseconds. + Weight::from_parts(10_894_000, 0) .saturating_add(Weight::from_parts(0, 11026)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -277,8 +307,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `100` // Estimated: `13465` - // Minimum execution time: 16_656_000 picoseconds. - Weight::from_parts(17_106_000, 0) + // Minimum execution time: 16_404_000 picoseconds. + Weight::from_parts(16_818_000, 0) .saturating_add(Weight::from_parts(0, 13465)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -299,8 +329,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `13507` - // Minimum execution time: 31_721_000 picoseconds. - Weight::from_parts(32_547_000, 0) + // Minimum execution time: 31_617_000 picoseconds. + Weight::from_parts(32_336_000, 0) .saturating_add(Weight::from_parts(0, 13507)) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) @@ -313,8 +343,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_439_000 picoseconds. - Weight::from_parts(3_619_000, 0) + // Minimum execution time: 3_328_000 picoseconds. + Weight::from_parts(3_501_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -325,8 +355,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 24_657_000 picoseconds. - Weight::from_parts(24_971_000, 0) + // Minimum execution time: 23_571_000 picoseconds. + Weight::from_parts(24_312_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -337,8 +367,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 34_028_000 picoseconds. - Weight::from_parts(34_697_000, 0) + // Minimum execution time: 32_879_000 picoseconds. + Weight::from_parts(33_385_000, 0) .saturating_add(Weight::from_parts(0, 3555)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs index b03c7748fe09..fc7d36a8ba18 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs @@ -269,6 +269,9 @@ impl xcm_executor::Config for XcmConfig { type SafeCallFilter = SafeCallFilter; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); } /// Converts a local signed origin into an XCM location. Forms the basis for local origins diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml index 888c873a5503..577205cd5543 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index a22d000cf68b..ede9d2184ba7 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -104,7 +104,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("glutton-westend"), impl_name: create_runtime_str!("glutton-westend"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -216,6 +216,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; } impl parachain_info::Config for Runtime {} diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs index ad61987c0e70..15bb519e115c 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs @@ -88,6 +88,9 @@ impl xcm_executor::Config for XcmConfig { type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); } impl cumulus_pallet_xcm::Config for Runtime { diff --git a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml index 635b8dca331e..5352929a9a6d 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml @@ -68,6 +68,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } @@ -84,6 +85,7 @@ std = [ "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-aura/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "cumulus-primitives-utility/std", "enumflags2/std", "frame-benchmarking?/std", diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index b0fece064c42..2de51424280d 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -98,6 +98,7 @@ pub type SignedExtra = ( frame_system::CheckNonce, frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. @@ -131,7 +132,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("people-rococo"), impl_name: create_runtime_str!("people-rococo"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 0, @@ -287,6 +288,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; type WeightInfo = weights::pallet_message_queue::WeightInfo; } @@ -461,7 +463,7 @@ mod benches { [cumulus_pallet_xcmp_queue, XcmpQueue] [pallet_collator_selection, CollatorSelection] // XCM - [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] ); @@ -649,7 +651,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -685,7 +687,7 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< xcm_config::XcmConfig, diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs index fabce29b5fd9..ac494fdc719f 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -62,8 +62,28 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 17_830_000 picoseconds. - Weight::from_parts(18_411_000, 0) + // Minimum execution time: 17_935_000 picoseconds. + Weight::from_parts(18_482_000, 0) + .saturating_add(Weight::from_parts(0, 3503)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn send_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `38` + // Estimated: `3503` + // Minimum execution time: 18_311_000 picoseconds. + Weight::from_parts(18_850_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -84,8 +104,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 55_456_000 picoseconds. - Weight::from_parts(56_808_000, 0) + // Minimum execution time: 56_182_000 picoseconds. + Weight::from_parts(58_136_000, 0) .saturating_add(Weight::from_parts(0, 3535)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -120,14 +140,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn execute_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_996_000 picoseconds. - Weight::from_parts(6_154_000, 0) + // Minimum execution time: 5_979_000 picoseconds. + Weight::from_parts(6_289_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -137,8 +167,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_768_000 picoseconds. - Weight::from_parts(1_914_000, 0) + // Minimum execution time: 1_853_000 picoseconds. + Weight::from_parts(2_045_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -162,8 +192,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 24_120_000 picoseconds. - Weight::from_parts(24_745_000, 0) + // Minimum execution time: 23_827_000 picoseconds. + Weight::from_parts(24_493_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) @@ -186,8 +216,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `255` // Estimated: `3720` - // Minimum execution time: 26_630_000 picoseconds. - Weight::from_parts(27_289_000, 0) + // Minimum execution time: 26_755_000 picoseconds. + Weight::from_parts(27_125_000, 0) .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -198,8 +228,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_821_000 picoseconds. - Weight::from_parts(1_946_000, 0) + // Minimum execution time: 1_898_000 picoseconds. + Weight::from_parts(2_028_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -209,8 +239,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89` // Estimated: `13454` - // Minimum execution time: 16_586_000 picoseconds. - Weight::from_parts(16_977_000, 0) + // Minimum execution time: 16_300_000 picoseconds. + Weight::from_parts(16_995_000, 0) .saturating_add(Weight::from_parts(0, 13454)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -221,8 +251,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `93` // Estimated: `13458` - // Minimum execution time: 16_923_000 picoseconds. - Weight::from_parts(17_415_000, 0) + // Minimum execution time: 16_495_000 picoseconds. + Weight::from_parts(16_950_000, 0) .saturating_add(Weight::from_parts(0, 13458)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -233,8 +263,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `15946` - // Minimum execution time: 18_596_000 picoseconds. - Weight::from_parts(18_823_000, 0) + // Minimum execution time: 18_153_000 picoseconds. + Weight::from_parts(18_595_000, 0) .saturating_add(Weight::from_parts(0, 15946)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -254,8 +284,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 23_817_000 picoseconds. - Weight::from_parts(24_520_000, 0) + // Minimum execution time: 23_387_000 picoseconds. + Weight::from_parts(24_677_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) @@ -266,8 +296,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `136` // Estimated: `11026` - // Minimum execution time: 11_042_000 picoseconds. - Weight::from_parts(11_578_000, 0) + // Minimum execution time: 10_939_000 picoseconds. + Weight::from_parts(11_210_000, 0) .saturating_add(Weight::from_parts(0, 11026)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -277,8 +307,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `100` // Estimated: `13465` - // Minimum execution time: 17_306_000 picoseconds. - Weight::from_parts(17_817_000, 0) + // Minimum execution time: 16_850_000 picoseconds. + Weight::from_parts(17_195_000, 0) .saturating_add(Weight::from_parts(0, 13465)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -299,8 +329,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `13471` - // Minimum execution time: 32_141_000 picoseconds. - Weight::from_parts(32_954_000, 0) + // Minimum execution time: 31_931_000 picoseconds. + Weight::from_parts(32_494_000, 0) .saturating_add(Weight::from_parts(0, 13471)) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) @@ -313,8 +343,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_410_000 picoseconds. - Weight::from_parts(3_556_000, 0) + // Minimum execution time: 3_514_000 picoseconds. + Weight::from_parts(3_709_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -325,8 +355,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 25_021_000 picoseconds. - Weight::from_parts(25_240_000, 0) + // Minimum execution time: 24_863_000 picoseconds. + Weight::from_parts(25_293_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -337,8 +367,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 33_801_000 picoseconds. - Weight::from_parts(34_655_000, 0) + // Minimum execution time: 33_799_000 picoseconds. + Weight::from_parts(34_665_000, 0) .saturating_add(Weight::from_parts(0, 3555)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/people/people-rococo/src/xcm_config.rs index 534d65190862..a10333fdb626 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/xcm_config.rs @@ -269,6 +269,9 @@ impl xcm_executor::Config for XcmConfig { type SafeCallFilter = SafeCallFilter; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); } /// Converts a local signed origin into an XCM location. Forms the basis for local origins diff --git a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml index b5866e729f14..84531f3f1edc 100644 --- a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml @@ -68,6 +68,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } @@ -84,6 +85,7 @@ std = [ "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-aura/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "cumulus-primitives-utility/std", "enumflags2/std", "frame-benchmarking?/std", diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index dc701099f547..d242bd1c3e54 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -100,6 +100,7 @@ pub type SignedExtra = ( frame_system::CheckNonce, frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. @@ -133,7 +134,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("people-westend"), impl_name: create_runtime_str!("people-westend"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 0, @@ -289,6 +290,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; type WeightInfo = weights::pallet_message_queue::WeightInfo; } @@ -351,7 +353,7 @@ parameter_types! { pub const StakingAdminBodyId: BodyId = BodyId::Defense; } -/// We allow Root and the `StakingAdmi` to execute privileged collator selection operations. +/// We allow Root and the `StakingAdmin` to execute privileged collator selection operations. pub type CollatorSelectionUpdateOrigin = EitherOfDiverse< EnsureRoot, EnsureXcm>, @@ -463,7 +465,7 @@ mod benches { [cumulus_pallet_xcmp_queue, XcmpQueue] [pallet_collator_selection, CollatorSelection] // XCM - [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] ); @@ -651,7 +653,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -687,7 +689,7 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< xcm_config::XcmConfig, diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs index c337289243b7..62a9c802808c 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -62,8 +62,28 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 17_856_000 picoseconds. - Weight::from_parts(18_473_000, 0) + // Minimum execution time: 17_450_000 picoseconds. + Weight::from_parts(17_913_000, 0) + .saturating_add(Weight::from_parts(0, 3503)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn send_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `38` + // Estimated: `3503` + // Minimum execution time: 18_082_000 picoseconds. + Weight::from_parts(18_293_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -84,8 +104,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 56_112_000 picoseconds. - Weight::from_parts(57_287_000, 0) + // Minimum execution time: 54_939_000 picoseconds. + Weight::from_parts(55_721_000, 0) .saturating_add(Weight::from_parts(0, 3535)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -120,14 +140,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn execute_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_186_000 picoseconds. - Weight::from_parts(6_420_000, 0) + // Minimum execution time: 5_789_000 picoseconds. + Weight::from_parts(5_995_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -137,8 +167,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_824_000 picoseconds. - Weight::from_parts(1_999_000, 0) + // Minimum execution time: 1_795_000 picoseconds. + Weight::from_parts(1_924_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -162,8 +192,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 23_833_000 picoseconds. - Weight::from_parts(24_636_000, 0) + // Minimum execution time: 23_445_000 picoseconds. + Weight::from_parts(23_906_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) @@ -186,8 +216,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `255` // Estimated: `3720` - // Minimum execution time: 26_557_000 picoseconds. - Weight::from_parts(27_275_000, 0) + // Minimum execution time: 26_590_000 picoseconds. + Weight::from_parts(27_056_000, 0) .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -198,8 +228,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_921_000 picoseconds. - Weight::from_parts(2_040_000, 0) + // Minimum execution time: 1_889_000 picoseconds. + Weight::from_parts(1_962_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -209,8 +239,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89` // Estimated: `13454` - // Minimum execution time: 16_832_000 picoseconds. - Weight::from_parts(17_312_000, 0) + // Minimum execution time: 16_408_000 picoseconds. + Weight::from_parts(16_877_000, 0) .saturating_add(Weight::from_parts(0, 13454)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -221,8 +251,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `93` // Estimated: `13458` - // Minimum execution time: 16_687_000 picoseconds. - Weight::from_parts(17_123_000, 0) + // Minimum execution time: 16_791_000 picoseconds. + Weight::from_parts(17_111_000, 0) .saturating_add(Weight::from_parts(0, 13458)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -233,8 +263,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `15946` - // Minimum execution time: 18_164_000 picoseconds. - Weight::from_parts(18_580_000, 0) + // Minimum execution time: 18_355_000 picoseconds. + Weight::from_parts(19_110_000, 0) .saturating_add(Weight::from_parts(0, 15946)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -254,8 +284,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 23_577_000 picoseconds. - Weight::from_parts(24_324_000, 0) + // Minimum execution time: 23_354_000 picoseconds. + Weight::from_parts(23_999_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) @@ -266,8 +296,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `136` // Estimated: `11026` - // Minimum execution time: 11_014_000 picoseconds. - Weight::from_parts(11_223_000, 0) + // Minimum execution time: 11_065_000 picoseconds. + Weight::from_parts(11_302_000, 0) .saturating_add(Weight::from_parts(0, 11026)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -277,8 +307,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `100` // Estimated: `13465` - // Minimum execution time: 16_887_000 picoseconds. - Weight::from_parts(17_361_000, 0) + // Minimum execution time: 16_998_000 picoseconds. + Weight::from_parts(17_509_000, 0) .saturating_add(Weight::from_parts(0, 13465)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -299,8 +329,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `13471` - // Minimum execution time: 31_705_000 picoseconds. - Weight::from_parts(32_166_000, 0) + // Minimum execution time: 31_068_000 picoseconds. + Weight::from_parts(31_978_000, 0) .saturating_add(Weight::from_parts(0, 13471)) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) @@ -313,8 +343,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_568_000 picoseconds. - Weight::from_parts(3_669_000, 0) + // Minimum execution time: 3_478_000 picoseconds. + Weight::from_parts(3_595_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -325,8 +355,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 24_823_000 picoseconds. - Weight::from_parts(25_344_000, 0) + // Minimum execution time: 24_962_000 picoseconds. + Weight::from_parts(25_404_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -337,8 +367,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 34_516_000 picoseconds. - Weight::from_parts(35_478_000, 0) + // Minimum execution time: 32_685_000 picoseconds. + Weight::from_parts(33_592_000, 0) .saturating_add(Weight::from_parts(0, 3555)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs index 6353a5813525..fee2f5684ac3 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs @@ -277,6 +277,9 @@ impl xcm_executor::Config for XcmConfig { type SafeCallFilter = SafeCallFilter; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); } /// Converts a local signed origin into an XCM location. Forms the basis for local origins diff --git a/cumulus/parachains/runtimes/starters/seedling/Cargo.toml b/cumulus/parachains/runtimes/starters/seedling/Cargo.toml index 46cb3676eb01..b418e52c9f2d 100644 --- a/cumulus/parachains/runtimes/starters/seedling/Cargo.toml +++ b/cumulus/parachains/runtimes/starters/seedling/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } diff --git a/cumulus/parachains/runtimes/starters/shell/Cargo.toml b/cumulus/parachains/runtimes/starters/shell/Cargo.toml index fc97445363bb..3b59ce1c1e28 100644 --- a/cumulus/parachains/runtimes/starters/shell/Cargo.toml +++ b/cumulus/parachains/runtimes/starters/shell/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } diff --git a/cumulus/parachains/runtimes/starters/shell/src/lib.rs b/cumulus/parachains/runtimes/starters/shell/src/lib.rs index e7cd5d7a869c..570a7f6a42b8 100644 --- a/cumulus/parachains/runtimes/starters/shell/src/lib.rs +++ b/cumulus/parachains/runtimes/starters/shell/src/lib.rs @@ -236,6 +236,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; } impl cumulus_pallet_aura_ext::Config for Runtime {} diff --git a/cumulus/parachains/runtimes/starters/shell/src/xcm_config.rs b/cumulus/parachains/runtimes/starters/shell/src/xcm_config.rs index f6af50f76d85..df89158729cd 100644 --- a/cumulus/parachains/runtimes/starters/shell/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/starters/shell/src/xcm_config.rs @@ -88,6 +88,9 @@ impl xcm_executor::Config for XcmConfig { type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = (); + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); } impl cumulus_pallet_xcm::Config for Runtime { diff --git a/cumulus/parachains/runtimes/test-utils/src/lib.rs b/cumulus/parachains/runtimes/test-utils/src/lib.rs index f7cc41c226bd..3fc3822a63eb 100644 --- a/cumulus/parachains/runtimes/test-utils/src/lib.rs +++ b/cumulus/parachains/runtimes/test-utils/src/lib.rs @@ -425,12 +425,13 @@ impl< } // do teleport - >::teleport_assets( + >::limited_teleport_assets( origin, Box::new(dest.into()), Box::new(beneficiary.into()), Box::new((AssetId(asset), amount).into()), 0, + Unlimited, ) } } diff --git a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml index 28b17360cc6d..f7a4fd186f07 100644 --- a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml @@ -21,7 +21,7 @@ substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1", optional = true } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } smallvec = "1.11.0" # Substrate diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index 6ab4cd63f93f..c9ea238ee6e5 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -61,7 +61,6 @@ use parachains_common::{ impls::{AssetsToBlockAuthor, NonZeroIssuance}, message_queue::{NarrowOriginToSibling, ParaIdToSibling}, }; -use polkadot_runtime_common::xcm_sender::NoPriceForMessageDelivery; use smallvec::smallvec; use sp_api::impl_runtime_apis; pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; @@ -89,7 +88,7 @@ use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; // XCM Imports use parachains_common::{AccountId, Signature}; -use xcm::latest::prelude::BodyId; +use xcm::latest::prelude::{AssetId as AssetLocationId, BodyId}; /// Balance of an account. pub type Balance = u128; @@ -545,10 +544,25 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; } impl cumulus_pallet_aura_ext::Config for Runtime {} +parameter_types! { + /// The asset ID for the asset that we use to pay for message delivery fees. + pub FeeAssetId: AssetLocationId = AssetLocationId(xcm_config::RelayLocation::get()); + /// The base fee for the message delivery fees (3 CENTS). + pub const BaseDeliveryFee: u128 = (1_000_000_000_000u128 / 100).saturating_mul(3); +} + +pub type PriceForSiblingParachainDelivery = polkadot_runtime_common::xcm_sender::ExponentialPrice< + FeeAssetId, + BaseDeliveryFee, + TransactionByteFee, + XcmpQueue, +>; + impl cumulus_pallet_xcmp_queue::Config for Runtime { type RuntimeEvent = RuntimeEvent; type ChannelInfo = ParachainSystem; @@ -559,7 +573,7 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime { type ControllerOrigin = EnsureRoot; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type WeightInfo = (); - type PriceForSiblingDelivery = NoPriceForMessageDelivery; + type PriceForSiblingDelivery = PriceForSiblingParachainDelivery; } parameter_types! { diff --git a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs index bc1aa05a6177..7b8e40e04288 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs @@ -28,6 +28,7 @@ use super::{ ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, WeightToFee, XcmpQueue, }; +use crate::{BaseDeliveryFee, FeeAssetId, TransactionByteFee}; use core::marker::PhantomData; use frame_support::{ parameter_types, @@ -36,10 +37,10 @@ use frame_support::{ }; use frame_system::EnsureRoot; use pallet_xcm::XcmPassthrough; -use parachains_common::xcm_config::AssetFeeAsExistentialDepositMultiplier; +use parachains_common::{xcm_config::AssetFeeAsExistentialDepositMultiplier, TREASURY_PALLET_ID}; use polkadot_parachain_primitives::primitives::Sibling; -use polkadot_runtime_common::impls::ToAuthor; -use sp_runtime::traits::ConvertInto; +use polkadot_runtime_common::{impls::ToAuthor, xcm_sender::ExponentialPrice}; +use sp_runtime::traits::{AccountIdConversion, ConvertInto}; use xcm::latest::prelude::*; use xcm_builder::{ AccountId32Aliases, AllowKnownQueryResponses, AllowSubscriptionsFrom, @@ -49,6 +50,7 @@ use xcm_builder::{ SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, StartsWith, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, WithComputedOrigin, WithUniqueTopic, + XcmFeeManagerFromComponents, XcmFeeToAccount, }; use xcm_executor::{traits::JustTry, XcmExecutor}; @@ -59,6 +61,7 @@ parameter_types! { pub const RelayNetwork: Option = None; pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); pub UniversalLocation: InteriorLocation = [Parachain(ParachainInfo::parachain_id().into())].into(); + pub TreasuryAccount: AccountId = TREASURY_PALLET_ID.into_account_truncating(); } /// Type for specifying how a `Location` can be converted into an `AccountId`. This is used @@ -142,7 +145,7 @@ pub type ForeignFungiblesTransactor = FungiblesAdapter< LocationToAccountId, // Our chain's account ID type (we can't get away without mentioning it explicitly): AccountId, - // We dont need to check teleports here. + // We don't need to check teleports here. NoChecking, // The account to use for tracking teleports. CheckingAccount, @@ -331,13 +334,19 @@ impl xcm_executor::Config for XcmConfig { type MaxAssetsIntoHolding = MaxAssetsIntoHolding; type AssetLocker = (); type AssetExchanger = (); - type FeeManager = (); + type FeeManager = XcmFeeManagerFromComponents< + (), + XcmFeeToAccount, + >; type MessageExporter = (); type UniversalAliases = Nothing; type CallDispatcher = RuntimeCall; type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); } /// Multiplier used for dedicated `TakeFirstAssetTrader` with `ForeignAssets` instance. @@ -352,11 +361,14 @@ pub type ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger = /// No local origins on this chain are allowed to dispatch XCM sends/executions. pub type LocalOriginToLocation = SignedToAccountId32; +pub type PriceForParentDelivery = + ExponentialPrice; + /// The means for routing XCM messages which are not for local execution into the right message /// queues. pub type XcmRouter = WithUniqueTopic<( // Two routers - use UMP to communicate with the relay chain: - cumulus_primitives_utility::ParentAsUmp, + cumulus_primitives_utility::ParentAsUmp, // ..and XCMP to communicate with the sibling chains. XcmpQueue, )>; diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml index d8baeab526cb..fcb5d167eca9 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } @@ -55,6 +55,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-ping = { path = "../../../pallets/ping", default-features = false } cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } parachains-common = { path = "../../../common", default-features = false } testnet-parachains-constants = { path = "../../constants", default-features = false, features = ["rococo"] } @@ -74,6 +75,7 @@ std = [ "cumulus-ping/std", "cumulus-primitives-aura/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index fda0f79395a9..4e36683d7f1f 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -111,7 +111,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("test-parachain"), impl_name: create_runtime_str!("test-parachain"), authoring_version: 1, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, @@ -324,6 +324,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = (); } impl cumulus_pallet_aura_ext::Config for Runtime {} @@ -488,6 +489,9 @@ impl xcm_executor::Config for XcmConfig { type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); } /// Local origins on this chain are allowed to dispatch XCM sends/executions. @@ -654,6 +658,7 @@ pub type SignedExtra = ( frame_system::CheckNonce, frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index 4255b84b41d1..e3674b80b8ef 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -15,7 +15,7 @@ name = "polkadot-parachain" path = "src/main.rs" [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" clap = { version = "4.5.3", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.28" @@ -42,7 +42,10 @@ jsonrpsee = { version = "0.22", features = ["server"] } people-rococo-runtime = { path = "../parachains/runtimes/people/people-rococo" } people-westend-runtime = { path = "../parachains/runtimes/people/people-westend" } parachains-common = { path = "../parachains/common" } -testnet-parachains-constants = { path = "../parachains/runtimes/constants", default-features = false, features = ["rococo", "westend"] } +testnet-parachains-constants = { path = "../parachains/runtimes/constants", default-features = false, features = [ + "rococo", + "westend", +] } # Substrate frame-benchmarking = { path = "../../substrate/frame/benchmarking" } @@ -168,3 +171,4 @@ try-runtime = [ "sp-runtime/try-runtime", ] fast-runtime = ["bridge-hub-rococo-runtime/fast-runtime"] +elastic-scaling-experimental = ["polkadot-service/elastic-scaling-experimental"] diff --git a/cumulus/polkadot-parachain/src/command.rs b/cumulus/polkadot-parachain/src/command.rs index 9ba7b7876b3e..ac9c6b6f978a 100644 --- a/cumulus/polkadot-parachain/src/command.rs +++ b/cumulus/polkadot-parachain/src/command.rs @@ -135,7 +135,7 @@ fn runtime(id: &str) -> Runtime { fn load_spec(id: &str) -> std::result::Result, String> { let (id, _, para_id) = extract_parachain_id(id); Ok(match id { - // - Defaul-like + // - Default-like "staging" => Box::new(chain_spec::rococo_parachain::staging_rococo_parachain_local_config()), "tick" => Box::new(GenericChainSpec::from_json_bytes( diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs index ddf595ca70c1..e9bb5947522b 100644 --- a/cumulus/polkadot-parachain/src/service.rs +++ b/cumulus/polkadot-parachain/src/service.rs @@ -69,13 +69,11 @@ use substrate_prometheus_endpoint::Registry; use polkadot_primitives::CollatorPair; #[cfg(not(feature = "runtime-benchmarks"))] -type HostFunctions = - (sp_io::SubstrateHostFunctions, cumulus_client_service::storage_proof_size::HostFunctions); +type HostFunctions = cumulus_client_service::ParachainHostFunctions; #[cfg(feature = "runtime-benchmarks")] type HostFunctions = ( - sp_io::SubstrateHostFunctions, - cumulus_client_service::storage_proof_size::HostFunctions, + cumulus_client_service::ParachainHostFunctions, frame_benchmarking::benchmarking::HostFunctions, ); diff --git a/cumulus/primitives/core/Cargo.toml b/cumulus/primitives/core/Cargo.toml index 5ce3c9b6b7cf..f5c2edd9934b 100644 --- a/cumulus/primitives/core/Cargo.toml +++ b/cumulus/primitives/core/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate sp-api = { path = "../../../substrate/primitives/api", default-features = false } diff --git a/cumulus/primitives/parachain-inherent/Cargo.toml b/cumulus/primitives/parachain-inherent/Cargo.toml index 5e3aa736d110..bec5e9267e54 100644 --- a/cumulus/primitives/parachain-inherent/Cargo.toml +++ b/cumulus/primitives/parachain-inherent/Cargo.toml @@ -10,9 +10,9 @@ license = "Apache-2.0" workspace = true [dependencies] -async-trait = { version = "0.1.74", optional = true } +async-trait = { version = "0.1.79", optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate sp-core = { path = "../../../substrate/primitives/core", default-features = false } diff --git a/cumulus/primitives/storage-weight-reclaim/Cargo.toml b/cumulus/primitives/storage-weight-reclaim/Cargo.toml index 16fd126c2a38..0ee239cf6ef0 100644 --- a/cumulus/primitives/storage-weight-reclaim/Cargo.toml +++ b/cumulus/primitives/storage-weight-reclaim/Cargo.toml @@ -12,16 +12,16 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../substrate/frame/system", default-features = false } sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -cumulus-primitives-core = { path = "../../primitives/core", default-features = false } -cumulus-primitives-proof-size-hostfunction = { path = "../../primitives/proof-size-hostfunction", default-features = false } -docify = "0.2.7" +cumulus-primitives-core = { path = "../core", default-features = false } +cumulus-primitives-proof-size-hostfunction = { path = "../proof-size-hostfunction", default-features = false } +docify = "0.2.8" [dev-dependencies] sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } diff --git a/cumulus/primitives/storage-weight-reclaim/src/lib.rs b/cumulus/primitives/storage-weight-reclaim/src/lib.rs index 890d2b94b8f0..5c0d1b990513 100644 --- a/cumulus/primitives/storage-weight-reclaim/src/lib.rs +++ b/cumulus/primitives/storage-weight-reclaim/src/lib.rs @@ -421,7 +421,7 @@ mod tests { .unwrap(); assert_eq!(pre, Some(100)); - // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` + // The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo` // we always need to call `post_dispatch` to verify that they interoperate correctly. assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); assert_ok!(StorageWeightReclaim::::post_dispatch( @@ -456,7 +456,7 @@ mod tests { .unwrap(); assert_eq!(pre, Some(100)); - // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` + // The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo` // we always need to call `post_dispatch` to verify that they interoperate correctly. assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); assert_ok!(StorageWeightReclaim::::post_dispatch( @@ -500,7 +500,7 @@ mod tests { &Ok(()) )); // `CheckWeight` gets called after `StorageWeightReclaim` this time. - // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` + // The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo` // we always need to call `post_dispatch` to verify that they interoperate correctly. assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); @@ -536,7 +536,7 @@ mod tests { &Ok(()) )); // `CheckWeight` gets called after `StorageWeightReclaim` this time. - // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` + // The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo` // we always need to call `post_dispatch` to verify that they interoperate correctly. assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); diff --git a/cumulus/primitives/timestamp/src/lib.rs b/cumulus/primitives/timestamp/src/lib.rs index b95b6f422f19..5365f83efdf1 100644 --- a/cumulus/primitives/timestamp/src/lib.rs +++ b/cumulus/primitives/timestamp/src/lib.rs @@ -22,7 +22,7 @@ //! access to any clock from the runtime the timestamp is always passed as an inherent into the //! runtime. To check this inherent when validating the block, we will use the relay chain slot. As //! the relay chain slot is derived from a timestamp, we can easily convert it back to a timestamp -//! by muliplying it with the slot duration. By comparing the relay chain slot derived timestamp +//! by multiplying it with the slot duration. By comparing the relay chain slot derived timestamp //! with the timestamp we can ensure that the parachain timestamp is reasonable. #![cfg_attr(not(feature = "std"), no_std)] diff --git a/cumulus/primitives/utility/src/lib.rs b/cumulus/primitives/utility/src/lib.rs index c8d59bcb3e24..826dc8b01d06 100644 --- a/cumulus/primitives/utility/src/lib.rs +++ b/cumulus/primitives/utility/src/lib.rs @@ -145,7 +145,7 @@ impl< ) -> Result { log::trace!(target: "xcm::weight", "TakeFirstAssetTrader::buy_weight weight: {:?}, payment: {:?}, context: {:?}", weight, payment, context); - // Make sure we dont enter twice + // Make sure we don't enter twice if self.0.is_some() { return Err(XcmError::NotWithdrawable) } @@ -180,7 +180,7 @@ impl< // Convert to the same kind of asset, with the required fungible balance let required = first.id.clone().into_asset(asset_balance.into()); - // Substract payment + // Subtract payment let unused = payment.checked_sub(required.clone()).map_err(|_| XcmError::TooExpensive)?; // record weight and asset @@ -207,7 +207,7 @@ impl< // Calculate asset_balance // This read should have already be cached in buy_weight - let (asset_balance, outstanding_minus_substracted) = + let (asset_balance, outstanding_minus_subtracted) = FeeCharger::charge_weight_in_fungibles(local_asset_id, weight).ok().map( |asset_balance| { // Require at least a drop of minimum_balance @@ -225,16 +225,15 @@ impl< )?; // Convert balances into u128 - let outstanding_minus_substracted: u128 = - outstanding_minus_substracted.saturated_into(); + let outstanding_minus_subtracted: u128 = outstanding_minus_subtracted.saturated_into(); let asset_balance: u128 = asset_balance.saturated_into(); - // Construct outstanding_concrete_asset with the same location id and substracted + // Construct outstanding_concrete_asset with the same location id and subtracted // balance let outstanding_concrete_asset: Asset = - (id.clone(), outstanding_minus_substracted).into(); + (id.clone(), outstanding_minus_subtracted).into(); - // Substract from existing weight and balance + // Subtract from existing weight and balance weight_outstanding = weight_outstanding.saturating_sub(weight); // Override AssetTraderRefunder @@ -267,9 +266,10 @@ impl< } } -/// XCM fee depositor to which we implement the TakeRevenue trait -/// It receives a Transact implemented argument, a 32 byte convertible acocuntId, and the fee -/// receiver account FungiblesMutateAdapter should be identical to that implemented by WithdrawAsset +/// XCM fee depositor to which we implement the `TakeRevenue` trait. +/// It receives a `Transact` implemented argument and a 32 byte convertible `AccountId`, and the fee +/// receiver account's `FungiblesMutateAdapter` should be identical to that implemented by +/// `WithdrawAsset`. pub struct XcmFeesTo32ByteAccount( PhantomData<(FungiblesMutateAdapter, AccountId, ReceiverAccount)>, ); @@ -767,7 +767,8 @@ mod test_trader { /// Implementation of `xcm_builder::EnsureDelivery` which helps to ensure delivery to the /// parent relay chain. Deposits existential deposit for origin (if needed). /// Deposits estimated fee to the origin account (if needed). -/// Allows to trigger additional logic for specific `ParaId` (e.g. open HRMP channel) (if neeeded). +/// Allows triggering of additional logic for a specific `ParaId` (e.g. to open an HRMP channel) if +/// needed. #[cfg(feature = "runtime-benchmarks")] pub struct ToParentDeliveryHelper( core::marker::PhantomData<(XcmConfig, ExistentialDeposit, PriceForDelivery)>, diff --git a/cumulus/scripts/scale_encode_genesis/index.js b/cumulus/scripts/scale_encode_genesis/index.js index f612e6da79dd..c6600e406361 100644 --- a/cumulus/scripts/scale_encode_genesis/index.js +++ b/cumulus/scripts/scale_encode_genesis/index.js @@ -19,14 +19,14 @@ async function connect(endpoint, types = {}) { } if (!process.argv[2] || !process.argv[3]) { - console.log("usage: node generate_keys [rpc enpoint]"); + console.log("usage: node generate_keys [rpc endpoint]"); exit(); } const input = process.argv[2]; const output = process.argv[3]; // default to localhost and the default Substrate port -const rpcEnpoint = process.argv[4] || "ws://localhost:9944"; +const rpcEndpoint = process.argv[4] || "ws://localhost:9944"; console.log("Processing", input, output); fs.readFile(input, "utf8", (err, data) => { @@ -38,8 +38,8 @@ fs.readFile(input, "utf8", (err, data) => { const genesis = JSON.parse(data); console.log("loaded genesis, length = ", genesis.length); - console.log(`Connecting to RPC endpoint: ${rpcEnpoint}`); - connect(rpcEnpoint) + console.log(`Connecting to RPC endpoint: ${rpcEndpoint}`); + connect(rpcEndpoint) .then((api) => { console.log('Connected'); const setStorage = api.tx.system.setStorage(genesis); diff --git a/cumulus/scripts/temp_parachain_types.json b/cumulus/scripts/temp_parachain_types.json index f550a6774450..2509d32be9fd 100644 --- a/cumulus/scripts/temp_parachain_types.json +++ b/cumulus/scripts/temp_parachain_types.json @@ -54,7 +54,7 @@ "validity_votes": "Vec", "validator_indices": "BitVec" }, - "CandidatePendingAvailablility": { + "CandidatePendingAvailability": { "core": "u32", "descriptor": "CandidateDescriptor", "availability_votes": "BitVec", diff --git a/cumulus/test/runtime/Cargo.toml b/cumulus/test/runtime/Cargo.toml index 1ce5c3b3236b..031486cd75ed 100644 --- a/cumulus/test/runtime/Cargo.toml +++ b/cumulus/test/runtime/Cargo.toml @@ -10,7 +10,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-executive = { path = "../../../substrate/frame/executive", default-features = false } diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index c3a84e16d7e9..b83445b6b8fb 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -143,7 +143,7 @@ pub fn native_version() -> NativeVersion { NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } } -/// We assume that ~10% of the block weight is consumed by `on_initalize` handlers. +/// We assume that ~10% of the block weight is consumed by `on_initialize` handlers. /// This is used to limit the maximal weight of a single extrinsic. const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); /// We allow `Normal` extrinsics to fill up the block up to 75%, the rest can be used diff --git a/cumulus/test/service/Cargo.toml b/cumulus/test/service/Cargo.toml index 45e21432f5b8..040fb479f6e8 100644 --- a/cumulus/test/service/Cargo.toml +++ b/cumulus/test/service/Cargo.toml @@ -13,7 +13,7 @@ name = "test-parachain" path = "src/main.rs" [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" clap = { version = "4.5.3", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.0.0" } criterion = { version = "0.5.1", features = ["async_tokio"] } @@ -88,7 +88,6 @@ pallet-timestamp = { path = "../../../substrate/frame/timestamp" } futures = "0.3.28" portpicker = "0.1.1" rococo-parachain-runtime = { path = "../../parachains/runtimes/testing/rococo-parachain" } -pallet-im-online = { path = "../../../substrate/frame/im-online" } sp-consensus-grandpa = { path = "../../../substrate/primitives/consensus/grandpa" } sp-authority-discovery = { path = "../../../substrate/primitives/authority-discovery" } cumulus-test-client = { path = "../client" } @@ -106,7 +105,6 @@ runtime-benchmarks = [ "cumulus-primitives-core/runtime-benchmarks", "cumulus-test-client/runtime-benchmarks", "frame-system/runtime-benchmarks", - "pallet-im-online/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "parachains-common/runtime-benchmarks", "polkadot-cli/runtime-benchmarks", diff --git a/cumulus/test/service/benches/validate_block.rs b/cumulus/test/service/benches/validate_block.rs index a614863803e0..d95f733969bc 100644 --- a/cumulus/test/service/benches/validate_block.rs +++ b/cumulus/test/service/benches/validate_block.rs @@ -47,7 +47,7 @@ fn create_extrinsics( src_accounts: &[sr25519::Pair], dst_accounts: &[sr25519::Pair], ) -> (usize, Vec) { - // Add as many tranfer extrinsics as possible into a single block. + // Add as many transfer extrinsics as possible into a single block. let mut block_builder = BlockBuilderBuilder::new(client) .on_parent_block(client.chain_info().best_hash) .with_parent_block_number(client.chain_info().best_number) diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index 3554a383f219..3af3901d175e 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -735,7 +735,7 @@ pub fn node_config( tokio_handle: tokio::runtime::Handle, key: Sr25519Keyring, nodes: Vec, - nodes_exlusive: bool, + nodes_exclusive: bool, para_id: ParaId, is_collator: bool, endowed_accounts: Vec, @@ -759,7 +759,7 @@ pub fn node_config( None, ); - if nodes_exlusive { + if nodes_exclusive { network_config.default_peers_set.reserved_nodes = nodes; network_config.default_peers_set.non_reserved_mode = sc_network::config::NonReservedPeerMode::Deny; diff --git a/docker/dockerfiles/binary_injected.Dockerfile b/docker/dockerfiles/binary_injected.Dockerfile index ac1fd5317c67..c8930bd83f02 100644 --- a/docker/dockerfiles/binary_injected.Dockerfile +++ b/docker/dockerfiles/binary_injected.Dockerfile @@ -2,7 +2,7 @@ FROM docker.io/parity/base-bin # This file allows building a Generic container image # based on one or multiple pre-built Linux binaries. -# Some defaults are set to polkadot but all can be overriden. +# Some defaults are set to polkadot but all can be overridden. SHELL ["/bin/bash", "-c"] diff --git a/docker/scripts/build-injected.sh b/docker/scripts/build-injected.sh index f415cf43c0ee..749d0fa335cc 100755 --- a/docker/scripts/build-injected.sh +++ b/docker/scripts/build-injected.sh @@ -20,7 +20,7 @@ PROJECT_ROOT=${PROJECT_ROOT:-$(git rev-parse --show-toplevel)} DOCKERFILE=${DOCKERFILE:-docker/dockerfiles/binary_injected.Dockerfile} VERSION_TOML=$(grep "^version " $PROJECT_ROOT/Cargo.toml | grep -oE "([0-9\.]+-?[0-9]+)") -#n The following VAR have default that can be overriden +#n The following VAR have default that can be overridden DOCKER_OWNER=${DOCKER_OWNER:-parity} # We may get 1..n binaries, comma separated diff --git a/docs/contributor/container.md b/docs/contributor/container.md index dd44b31bfe96..9c542f411c81 100644 --- a/docs/contributor/container.md +++ b/docs/contributor/container.md @@ -16,7 +16,7 @@ Parity builds and publishes a container image that can be found as `docker.io/pa ## Parity CI image Parity maintains and uses internally a generic "CI" image that can be used as a base to build binaries: [Parity CI -container image](https://github.com/paritytech/scripts/tree/master/dockerfiles/ci-linux): +container image](https://github.com/paritytech/scripts/tree/master/dockerfiles/ci-unified): The command below allows building a Linux binary without having to even install Rust or any dependency locally: @@ -24,14 +24,11 @@ The command below allows building a Linux binary without having to even install docker run --rm -it \ -w /polkadot-sdk \ -v $(pwd):/polkadot-sdk \ - paritytech/ci-linux:production \ + paritytech/ci-unified:bullseye-1.75.0-2024-01-22-v20240222 \ cargo build --release --locked -p polkadot-parachain-bin --bin polkadot-parachain sudo chown -R $(id -u):$(id -g) target/ ``` -If you want to reproduce other steps of CI process you can use the following -[guide](https://github.com/paritytech/scripts#gitlab-ci-for-building-docker-images). - ## Injected image Injecting a binary inside a base image is the quickest option to get a working container image. This only works if you diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml index 3f40a950c286..426c5d9de4a0 100644 --- a/docs/sdk/Cargo.toml +++ b/docs/sdk/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-sdk-docs" -description = "The one stop shop for developers of the polakdot-sdk" +description = "The one stop shop for developers of the polkadot-sdk" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "paritytech.github.io" repository.workspace = true @@ -17,7 +17,7 @@ workspace = true # Needed for all FRAME-based code parity-scale-codec = { version = "3.0.0", default-features = false } scale-info = { version = "2.6.0", default-features = false } -frame = { path = "../../substrate/frame", features = [ +frame = { package = "polkadot-sdk-frame", path = "../../substrate/frame", features = [ "experimental", "runtime", ] } @@ -27,7 +27,7 @@ pallet-example-offchain-worker = { path = "../../substrate/frame/examples/offcha # How we build docs in rust-docs simple-mermaid = "0.1.1" -docify = "0.2.7" +docify = "0.2.8" # Polkadot SDK deps, typically all should only be in scope such that we can link to their doc item. node-cli = { package = "staging-node-cli", path = "../../substrate/bin/node/cli" } @@ -66,6 +66,7 @@ pallet-aura = { path = "../../substrate/frame/aura", default-features = false } pallet-timestamp = { path = "../../substrate/frame/timestamp" } pallet-balances = { path = "../../substrate/frame/balances" } pallet-assets = { path = "../../substrate/frame/assets" } +pallet-preimage = { path = "../../substrate/frame/preimage" } pallet-transaction-payment = { path = "../../substrate/frame/transaction-payment" } pallet-utility = { path = "../../substrate/frame/utility" } pallet-multisig = { path = "../../substrate/frame/multisig" } @@ -73,6 +74,8 @@ pallet-proxy = { path = "../../substrate/frame/proxy" } pallet-authorship = { path = "../../substrate/frame/authorship" } pallet-collective = { path = "../../substrate/frame/collective" } pallet-democracy = { path = "../../substrate/frame/democracy" } +pallet-uniques = { path = "../../substrate/frame/uniques" } +pallet-nfts = { path = "../../substrate/frame/nfts" } pallet-scheduler = { path = "../../substrate/frame/scheduler" } # Primitives @@ -81,6 +84,13 @@ sp-api = { path = "../../substrate/primitives/api" } sp-core = { path = "../../substrate/primitives/core" } sp-keyring = { path = "../../substrate/primitives/keyring" } sp-runtime = { path = "../../substrate/primitives/runtime" } +sp-arithmetic = { path = "../../substrate/primitives/arithmetic" } + +# Misc pallet dependencies +pallet-referenda = { path = "../../substrate/frame/referenda" } +pallet-broker = { path = "../../substrate/frame/broker" } +pallet-babe = { path = "../../substrate/frame/babe" } + sp-offchain = { path = "../../substrate/primitives/offchain" } sp-version = { path = "../../substrate/primitives/version" } diff --git a/docs/sdk/src/guides/your_first_pallet/mod.rs b/docs/sdk/src/guides/your_first_pallet/mod.rs index 06eab0cfadef..de9168489552 100644 --- a/docs/sdk/src/guides/your_first_pallet/mod.rs +++ b/docs/sdk/src/guides/your_first_pallet/mod.rs @@ -105,8 +105,8 @@ //! This macro will call `.into()` under the hood. #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", transfer_better)] //! -//! Moreover, you will learn in the [Safe Defensive Programming -//! section](crate::reference_docs::safe_defensive_programming) that it is always recommended to use +//! Moreover, you will learn in the [Defensive Programming +//! section](crate::reference_docs::defensive_programming) that it is always recommended to use //! safe arithmetic operations in your runtime. By using [`frame::traits::CheckedSub`], we can not //! only take a step in that direction, but also improve the error handing and make it slightly more //! ergonomic. @@ -294,7 +294,7 @@ //! The following topics where used in this guide, but not covered in depth. It is suggested to //! study them subsequently: //! -//! - [`crate::reference_docs::safe_defensive_programming`]. +//! - [`crate::reference_docs::defensive_programming`]. //! - [`crate::reference_docs::frame_origin`]. //! - [`crate::reference_docs::frame_runtime_types`]. //! - The pallet we wrote in this guide was using `dev_mode`, learn more in diff --git a/docs/sdk/src/polkadot_sdk/substrate.rs b/docs/sdk/src/polkadot_sdk/substrate.rs index 5021c55e581f..69d74d86db1b 100644 --- a/docs/sdk/src/polkadot_sdk/substrate.rs +++ b/docs/sdk/src/polkadot_sdk/substrate.rs @@ -99,7 +99,7 @@ //! demonstration. //! * [`chain_spec_builder`]: Utility to build more detailed chain-specs for the aforementioned //! node. Other projects typically contain a `build-spec` subcommand that does the same. -//! * [`node_template`](https://github.com/paritytech/polkadot-sdk/tree/master/substrate/bin/node-template): +//! * [`node_template`](https://github.com/paritytech/polkadot-sdk/tree/master/substrate/bin/node): //! a template node that contains a minimal set of features and can act as a starting point of a //! project. //! * [`subkey`]: Substrate's key management utility. diff --git a/docs/sdk/src/polkadot_sdk/templates.rs b/docs/sdk/src/polkadot_sdk/templates.rs index f60c75b8f219..4bf0e839c798 100644 --- a/docs/sdk/src/polkadot_sdk/templates.rs +++ b/docs/sdk/src/polkadot_sdk/templates.rs @@ -22,9 +22,8 @@ //! - [`frontier-parachain-template`](https://github.com/paritytech/frontier-parachain-template): A //! parachain template for launching EVM-compatible parachains. //! -//! [`substrate-node-template`]: https://github.com/paritytech/polkadot-sdk/blob/master/substrate/bin/node-template/ -//! [`substrate-minimal-template`]: https://github.com/paritytech/polkadot-sdk/blob/master/substrate/bin/minimal/ -//! [`cumulus-parachain-template`]: https://github.com/paritytech/polkadot-sdk/blob/master/cumulus/parachain-template/ +//! [`minimal-template`]: https://github.com/paritytech/polkadot-sdk/blob/master/templates/minimal/ +//! [`parachain-template`]: https://github.com/paritytech/polkadot-sdk/blob/master/templates/parachain/ // TODO: in general, we need to make a deliberate choice here of moving a few key templates to this // repo (nothing stays in `substrate-developer-hub`) and the everything else should be community diff --git a/docs/sdk/src/reference_docs/defensive_programming.rs b/docs/sdk/src/reference_docs/defensive_programming.rs new file mode 100644 index 000000000000..9828e1b50918 --- /dev/null +++ b/docs/sdk/src/reference_docs/defensive_programming.rs @@ -0,0 +1,395 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! [Defensive programming](https://en.wikipedia.org/wiki/Defensive_programming) is a design paradigm that enables a program to continue +//! running despite unexpected behavior, input, or events that may arise in runtime. +//! Usually, unforeseen circumstances may cause the program to stop or, in the Rust context, +//! panic!. Defensive practices allow for these circumstances to be accounted for ahead of time +//! and for them to be handled gracefully, which is in line with the intended fault-tolerant and +//! deterministic nature of blockchains. +//! +//! The Polkadot SDK is built to reflect these principles and to facilitate their usage accordingly. +//! +//! ## General Overview +//! +//! When developing within the context of the Substrate runtime, there is one golden rule: +//! +//! ***DO NOT PANIC***. There are some exceptions, but generally, this is the default precedent. +//! +//! > It’s important to differentiate between the runtime and node. The runtime refers to the core +//! > business logic of a Substrate-based chain, whereas the node refers to the outer client, which +//! > deals with telemetry and gossip from other nodes. For more information, read about +//! > [Substrate's node +//! > architecture](crate::reference_docs::wasm_meta_protocol#node-vs-runtime). It’s also important +//! > to note that the criticality of the node is slightly lesser +//! > than that of the runtime, which is why you may see `unwrap()` or other “non-defensive” +//! > approaches +//! in a few places of the node's code repository. +//! +//! Most of these practices fall within Rust's +//! colloquial usage of proper error propagation, handling, and arithmetic-based edge cases. +//! +//! General guidelines: +//! +//! - **Avoid writing functions that could explicitly panic,** such as directly using `unwrap()` on +//! a [`Result`], or accessing an out-of-bounds index on a collection. Safer methods to access +//! collection types, i.e., `get()` which allow defensive handling of the resulting [`Option`] are +//! recommended to be used. +//! - **It may be acceptable to use `except()`,** but only if one is completely certain (and has +//! performed a check beforehand) that a value won't panic upon unwrapping. *Even this is +//! discouraged*, however, as future changes to that function could then cause that statement to +//! panic. It is important to ensure all possible errors are propagated and handled effectively. +//! - **If a function *can* panic,** it usually is prefaced with `unchecked_` to indicate its +//! unsafety. +//! - **If you are writing a function that could panic,** [document it!](https://doc.rust-lang.org/rustdoc/how-to-write-documentation.html#documenting-components) +//! - **Carefully handle mathematical operations.** Many seemingly, simplistic operations, such as +//! **arithmetic** in the runtime, could present a number of issues [(see more later in this +//! document)](#integer-overflow). Use checked arithmetic wherever possible. +//! +//! These guidelines could be summarized in the following example, where `bad_pop` is prone to +//! panicking, and `good_pop` allows for proper error handling to take place: +//! +//!```ignore +//! // Bad pop always requires that we return something, even if vector/array is empty. +//! fn bad_pop(v: Vec) -> T {} +//! // Good pop allows us to return None from the Option if need be. +//! fn good_pop(v: Vec) -> Option {} +//! ``` +//! +//! ### Defensive Traits +//! +//! The [`Defensive`](frame::traits::Defensive) trait provides a number of functions, all of which +//! provide an alternative to 'vanilla' Rust functions, e.g.,: +//! +//! - [`defensive_unwrap_or()`](frame::traits::Defensive::defensive_unwrap_or) instead of +//! `unwrap_or()` +//! - [`defensive_ok_or()`](frame::traits::DefensiveOption::defensive_ok_or) instead of `ok_or()` +//! +//! Defensive methods use [`debug_assertions`](https://doc.rust-lang.org/reference/conditional-compilation.html#debug_assertions), which panic in development, but in +//! production/release, they will merely log an error (i.e., `log::error`). +//! +//! The [`Defensive`](frame::traits::Defensive) trait and its various implementations can be found +//! [here](frame::traits::Defensive). +//! +//! ## Integer Overflow +//! +//! The Rust compiler prevents static overflow from happening at compile time. +//! The compiler panics in **debug** mode in the event of an integer overflow. In +//! **release** mode, it resorts to silently _wrapping_ the overflowed amount in a modular fashion +//! (from the `MAX` back to zero). +//! +//! In runtime development, we don't always have control over what is being supplied +//! as a parameter. For example, even this simple add function could present one of two outcomes +//! depending on whether it is in **release** or **debug** mode: +//! +//! ```ignore +//! fn naive_add(x: u8, y: u8) -> u8 { +//! x + y +//! } +//! ``` +//! If we passed overflow-able values at runtime, this could panic (or wrap if in release). +//! +//! ```ignore +//! naive_add(250u8, 10u8); // In debug mode, this would panic. In release, this would return 4. +//! ``` +//! +//! It is the silent portion of this behavior that presents a real issue. Such behavior should be +//! made obvious, especially in blockchain development, where unsafe arithmetic could produce +//! unexpected consequences like a user balance over or underflowing. +//! +//! Fortunately, there are ways to both represent and handle these scenarios depending on our +//! specific use case natively built into Rust and libraries like [`sp_arithmetic`]. +//! +//! ## Infallible Arithmetic +//! +//! Both Rust and Substrate provide safe ways to deal with numbers and alternatives to floating +//! point arithmetic. +//! +//! Known scenarios that could be fallible should be avoided: i.e., avoiding the possibility of +//! dividing/modulo by zero at any point should be mitigated. One should be opting for a +//! `checked_*` method to introduce safe arithmetic in their code in most cases. +//! +//! A developer should use fixed-point instead of floating-point arithmetic to mitigate the +//! potential for inaccuracy, rounding errors, or other unexpected behavior. +//! +//! - [Fixed point types](sp_arithmetic::fixed_point) and their associated usage can be found here. +//! - [PerThing](sp_arithmetic::per_things) and its associated types can be found here. +//! +//! Using floating point number types (i.e., f32. f64) in the runtime should be avoided, as a single non-deterministic result could cause chaos for blockchain consensus along with the issues above. For more on the specifics of the peculiarities of floating point calculations, [watch this video by the Computerphile](https://www.youtube.com/watch?v=PZRI1IfStY0). +//! +//! The following methods demonstrate different ways to handle numbers natively in Rust safely, +//! without fear of panic or unexpected behavior from wrapping. +//! +//! ### Checked Arithmetic +//! +//! **Checked operations** utilize an `Option` as a return type. This allows for +//! catching any unexpected behavior in the event of an overflow through simple pattern matching. +//! +//! This is an example of a valid operation: +#![doc = docify::embed!("./src/reference_docs/defensive_programming.rs", checked_add_example)] +//! +//! This is an example of an invalid operation. In this case, a simulated integer overflow, which +//! would simply result in `None`: +#![doc = docify::embed!( + "./src/reference_docs/defensive_programming.rs", + checked_add_handle_error_example +)] +//! +//! Suppose you aren’t sure which operation to use for runtime math. In that case, checked +//! operations are the safest bet, presenting two predictable (and erroring) outcomes that can be +//! handled accordingly (Some and None). +//! +//! The following conventions can be seen within the Polkadot SDK, where it is +//! handled in two ways: +//! +//! - As an [`Option`], using the `if let` / `if` or `match` +//! - As a [`Result`], via `ok_or` (or similar conversion to [`Result`] from [`Option`]) +//! +//! #### Handling via Option - More Verbose +//! +//! Because wrapped operations return `Option`, you can use a more verbose/explicit form of error +//! handling via `if` or `if let`: +#![doc = docify::embed!("./src/reference_docs/defensive_programming.rs", increase_balance)] +//! +//! Optionally, match may also be directly used in a more concise manner: +#![doc = docify::embed!("./src/reference_docs/defensive_programming.rs", increase_balance_match)] +//! +//! This is generally a useful convention for handling checked types and most types that return +//! `Option`. +//! +//! #### Handling via Result - Less Verbose +//! +//! In the Polkadot SDK codebase, checked operations are handled as a `Result` via `ok_or`. This is +//! a less verbose way of expressing the above. This usage often boils down to the developer’s +//! preference: +#![doc = docify::embed!("./src/reference_docs/defensive_programming.rs", increase_balance_result)] +//! +//! ### Saturating Operations +//! +//! Saturating a number limits it to the type’s upper or lower bound, even if the integer type +//! overflowed in runtime. For example, adding to `u32::MAX` would simply limit itself to +//! `u32::MAX`: +#![doc = docify::embed!("./src/reference_docs/defensive_programming.rs", saturated_add_example)] +//! +//! Saturating calculations can be used if one is very sure that something won't overflow, but wants +//! to avoid introducing the notion of any potential-panic or wrapping behavior. +//! +//! There is also a series of defensive alternatives via +//! [`DefensiveSaturating`](frame::traits::DefensiveSaturating), which introduces the same behavior +//! of the [`Defensive`](frame::traits::Defensive) trait, only with saturating, mathematical +//! operations: +#![doc = docify::embed!( + "./src/reference_docs/defensive_programming.rs", + saturated_defensive_example +)] +//! +//! ### Mathematical Operations in Substrate Development - Further Context +//! +//! As a recap, we covered the following concepts: +//! +//! 1. **Checked** operations - using [`Option`] or [`Result`] +//! 2. **Saturating** operations - limited to the lower and upper bounds of a number type +//! 3. **Wrapped** operations (the default) - wrap around to above or below the bounds of a type +//! +//! #### The problem with 'default' wrapped operations +//! +//! **Wrapped operations** cause the overflow to wrap around to either the maximum or minimum of +//! that type. Imagine this in the context of a blockchain, where there are account balances, voting +//! counters, nonces for transactions, and other aspects of a blockchain. +//! +//! While it may seem trivial, choosing how to handle numbers is quite important. As a thought +//! exercise, here are some scenarios of which will shed more light on when to use which. +//! +//! #### Bob's Overflowed Balance +//! +//! **Bob's** balance exceeds the `Balance` type on the `EduChain`. Because the pallet developer did +//! not handle the calculation to add to Bob's balance with any regard to this overflow, **Bob's** +//! balance is now essentially `0`, the operation **wrapped**. +//! +//!
+//! Solution: Saturating or Checked +//! For Bob's balance problems, using a `saturating_add` or `checked_add` could've mitigated +//! this issue. They simply would've reached the upper, or lower bounds, of the particular type for +//! an on-chain balance. In other words: Bob's balance would've stayed at the maximum of the +//! Balance type.
+//! +//! #### Alice's 'Underflowed' Balance +//! +//! Alice’s balance has reached `0` after a transfer to Bob. Suddenly, she has been slashed on +//! EduChain, causing her balance to reach near the limit of `u32::MAX` - a very large amount - as +//! wrapped operations can go both ways. Alice can now successfully vote using her new, overpowered +//! token balance, destroying the chain's integrity. +//! +//!
+//! Solution: Saturating +//! For Alice's balance problem, using `saturated_sub` could've mitigated this issue. A saturating +//! calculation would've simply limited her balance to the lower bound of u32, as having a negative +//! balance is not a concept within blockchains. In other words: Alice's balance would've stayed +//! at "0", even after being slashed. +//! +//! This is also an example that while one system may work in isolation, shared interfaces, such +//! as the notion of balances, are often shared across multiple pallets - meaning these small +//! changes can make a big difference depending on the scenario.
+//! +//! #### Proposal ID Overwrite +//! +//! A `u8` parameter, called `proposals_count`, represents the type for counting the number of +//! proposals on-chain. Every time a new proposal is added to the system, this number increases. +//! With the proposal pallet's high usage, it has reached `u8::MAX`’s limit of 255, causing +//! `proposals_count` to go to 0. Unfortunately, this results in new proposals overwriting old ones, +//! effectively erasing any notion of past proposals! +//! +//!
+//! Solution: Checked +//! For the proposal IDs, proper handling via `checked` math would've been suitable, +//! Saturating could've been used - but it also would've 'failed' silently. Using `checked_add` to +//! ensure that the next proposal ID would've been valid would've been a viable way to let the user +//! know the state of their proposal: +//! +//! ```ignore +//! let next_proposal_id = current_count.checked_add(1).ok_or_else(|| Error::TooManyProposals)?; +//! ``` +//! +//!
+//! +//! From the above, we can clearly see the problematic nature of seemingly simple operations in the +//! runtime, and care should be given to ensure a defensive approach is taken. +//! +//! ### Edge cases of `panic!`-able instances in Substrate +//! +//! As you traverse through the codebase (particularly in `substrate/frame`, where the majority of +//! runtime code lives), you may notice that there (only a few!) occurrences where `panic!` is used +//! explicitly. This is used when the runtime should stall, rather than keep running, as that is +//! considered safer. Particularly when it comes to mission-critical components, such as block +//! authoring, consensus, or other protocol-level dependencies, going through with an action may +//! actually cause harm to the network, and thus stalling would be the better option. +//! +//! Take the example of the BABE pallet ([`pallet_babe`]), which doesn't allow for a validator to +//! participate if it is disabled (see: [`frame::traits::DisabledValidators`]): +//! +//! ```ignore +//! if T::DisabledValidators::is_disabled(authority_index) { +//! panic!( +//! "Validator with index {:?} is disabled and should not be attempting to author blocks.", +//! authority_index, +//! ); +//! } +//! ``` +//! +//! There are other examples in various pallets, mostly those crucial to the blockchain’s +//! functionality. Most of the time, you will not be writing pallets which operate at this level, +//! but these exceptions should be noted regardless. +//! +//! ## Other Resources +//! +//! - [PBA Book - FRAME Tips & Tricks](https://polkadot-blockchain-academy.github.io/pba-book/substrate/tips-tricks/page.html?highlight=perthing#substrate-and-frame-tips-and-tricks) +#![allow(dead_code)] +#[allow(unused_variables)] +mod fake_runtime_types { + // Note: The following types are purely for the purpose of example, and do not contain any + // *real* use case other than demonstrating various concepts. + pub enum RuntimeError { + Overflow, + UserDoesntExist, + } + + pub type Address = (); + + pub struct Runtime; + + impl Runtime { + fn get_balance(account: Address) -> Result { + Ok(0u64) + } + + fn set_balance(account: Address, new_balance: u64) {} + } + + #[docify::export] + fn increase_balance(account: Address, amount: u64) -> Result<(), RuntimeError> { + // Get a user's current balance + let balance = Runtime::get_balance(account)?; + // SAFELY increase the balance by some amount + if let Some(new_balance) = balance.checked_add(amount) { + Runtime::set_balance(account, new_balance); + Ok(()) + } else { + Err(RuntimeError::Overflow) + } + } + + #[docify::export] + fn increase_balance_match(account: Address, amount: u64) -> Result<(), RuntimeError> { + // Get a user's current balance + let balance = Runtime::get_balance(account)?; + // SAFELY increase the balance by some amount + let new_balance = match balance.checked_add(amount) { + Some(balance) => balance, + None => { + return Err(RuntimeError::Overflow); + }, + }; + Runtime::set_balance(account, new_balance); + Ok(()) + } + + #[docify::export] + fn increase_balance_result(account: Address, amount: u64) -> Result<(), RuntimeError> { + // Get a user's current balance + let balance = Runtime::get_balance(account)?; + // SAFELY increase the balance by some amount - this time, by using `ok_or` + let new_balance = balance.checked_add(amount).ok_or(RuntimeError::Overflow)?; + Runtime::set_balance(account, new_balance); + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use frame::traits::DefensiveSaturating; + #[docify::export] + #[test] + fn checked_add_example() { + // This is valid, as 20 is perfectly within the bounds of u32. + let add = (10u32).checked_add(10); + assert_eq!(add, Some(20)) + } + + #[docify::export] + #[test] + fn checked_add_handle_error_example() { + // This is invalid - we are adding something to the max of u32::MAX, which would overflow. + // Luckily, checked_add just marks this as None! + let add = u32::MAX.checked_add(10); + assert_eq!(add, None) + } + + #[docify::export] + #[test] + fn saturated_add_example() { + // Saturating add simply saturates + // to the numeric bound of that type if it overflows. + let add = u32::MAX.saturating_add(10); + assert_eq!(add, u32::MAX) + } + + #[docify::export] + #[test] + #[cfg_attr(debug_assertions, should_panic(expected = "Defensive failure has been triggered!"))] + fn saturated_defensive_example() { + let saturated_defensive = u32::MAX.defensive_saturating_add(10); + assert_eq!(saturated_defensive, u32::MAX); + } +} diff --git a/docs/sdk/src/reference_docs/frame_currency.rs b/docs/sdk/src/reference_docs/frame_currency.rs deleted file mode 100644 index 6987d51aec82..000000000000 --- a/docs/sdk/src/reference_docs/frame_currency.rs +++ /dev/null @@ -1,8 +0,0 @@ -//! FRAME Currency Abstractions and Traits -//! -//! Notes: -//! -//! - History, `Currency` trait. -//! - `Hold` and `Freeze` with diagram. -//! - `HoldReason` and `FreezeReason` -//! - This footgun: diff --git a/docs/sdk/src/reference_docs/frame_pallet_coupling.rs b/docs/sdk/src/reference_docs/frame_pallet_coupling.rs index cca7f9feb3f4..be464bbbf835 100644 --- a/docs/sdk/src/reference_docs/frame_pallet_coupling.rs +++ b/docs/sdk/src/reference_docs/frame_pallet_coupling.rs @@ -143,7 +143,7 @@ //! For example, all pallets in `polkadot-sdk` that needed to work with currencies could have been //! tightly coupled with [`pallet_balances`]. But, `polkadot-sdk` also provides [`pallet_assets`] //! (and more implementations by the community), therefore all pallets use traits to loosely couple -//! with balances or assets pallet. More on this in [`crate::reference_docs::frame_currency`]. +//! with balances or assets pallet. More on this in [`crate::reference_docs::frame_tokens`]. //! //! ## Further References //! diff --git a/docs/sdk/src/reference_docs/frame_runtime_upgrades_and_migrations.rs b/docs/sdk/src/reference_docs/frame_runtime_upgrades_and_migrations.rs index 7d870b432218..f9a69b892a31 100644 --- a/docs/sdk/src/reference_docs/frame_runtime_upgrades_and_migrations.rs +++ b/docs/sdk/src/reference_docs/frame_runtime_upgrades_and_migrations.rs @@ -16,7 +16,7 @@ //! in a process called "Runtime Upgrades". //! //! Forkless runtime upgrades are a defining feature of the Substrate framework. Updating the -//! runtime logic without forking the code base enables your blockchain to seemlessly evolve +//! runtime logic without forking the code base enables your blockchain to seamlessly evolve //! over time in a deterministic, rules-based manner. It also removes ambiguity for node operators //! and other participants in the network about what is the canonical runtime. //! @@ -24,7 +24,7 @@ //! //! ## Performing a Runtime Upgrade //! -//! To upgrade a runtime, an [`Origin`](frame_system::RawOrigin) with the necesarry permissions +//! To upgrade a runtime, an [`Origin`](frame_system::RawOrigin) with the necessary permissions //! (usually via governance) changes the `:code` storage. Usually, this is performed via a call to //! [`set_code`] (or [`set_code_without_checks`]) with the desired new runtime blob, scheduled //! using [`pallet_scheduler`]. @@ -41,7 +41,7 @@ //! //! The typical use case of a migration is to 'migrate' pallet storage from one layout to another, //! for example when the encoding of a storage item is changed. However, they can also execute -//! arbitary logic such as: +//! arbitrary logic such as: //! //! - Calling arbitrary pallet methods //! - Mutating arbitrary on-chain state @@ -88,7 +88,7 @@ //! //! Prior to deploying migrations, it is critical to perform additional checks to ensure that when //! run in our real runtime they will not brick the chain due to: -//! - Panicing +//! - Panicking //! - Touching too many storage keys and resulting in an excessively large PoV //! - Taking too long to execute //! @@ -131,7 +131,6 @@ //! //! TODO: Link to multi block migration example/s once PR is merged (). //! -//! [`GetStorageVersion`]: frame_support::traits::GetStorageVersion //! [`OnRuntimeUpgrade`]: frame_support::traits::OnRuntimeUpgrade //! [`StorageVersion`]: frame_support::traits::StorageVersion //! [`set_code`]: frame_system::Call::set_code diff --git a/docs/sdk/src/reference_docs/frame_tokens.rs b/docs/sdk/src/reference_docs/frame_tokens.rs new file mode 100644 index 000000000000..c9d34e2091d8 --- /dev/null +++ b/docs/sdk/src/reference_docs/frame_tokens.rs @@ -0,0 +1,131 @@ +// This file is part of polkadot-sdk. +// +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # FRAME Tokens +//! +//! This reference doc serves as a high-level overview of the token-related logic in FRAME, and +//! how to properly apply it to your use case. +//! +//! On completion of reading this doc, you should have a good understanding of: +//! - The distinction between token traits and trait implementations in FRAME, and why this +//! distinction is helpful +//! - Token-related traits avaliable in FRAME +//! - Token-related trait implementations in FRAME +//! - How to choose the right trait or trait implementation for your use case +//! - Where to go next +//! +//! ## Getting Started +//! +//! The most ubiquitous way to add a token to a FRAME runtime is [`pallet_balances`]. Read +//! more about pallets [here](crate::polkadot_sdk::frame_runtime#pallets). +//! +//! You may then write custom pallets that interact with [`pallet_balances`]. The fastest way to +//! get started with that is by +//! [tightly coupling](crate::reference_docs::frame_pallet_coupling#tight-coupling-pallets) your +//! custom pallet to [`pallet_balances`]. +//! +//! However, to keep pallets flexible and modular, it is often prefered to +//! [loosely couple](crate::reference_docs::frame_pallet_coupling#loosely--coupling-pallets). +//! +//! To achieve loose coupling, +//! we separate token logic into traits and trait implementations. +//! +//! ## Traits and Trait Implementations +//! +//! Broadly speaking, token logic in FRAME can be divided into two categories: traits and +//! trait implementations. +//! +//! **Traits** define common interfaces that types of tokens should implement. For example, the +//! [`fungible::Inspect`](`frame_support::traits::fungible::Inspect`) trait specifies an interface +//! for *inspecting* token state such as the total issuance of the token, the balance of individual +//! accounts, etc. +//! +//! **Trait implementations** are concrete implementations of these traits. For example, one of the +//! many traits [`pallet_balances`] implements is +//! [`fungible::Inspect`](`frame_support::traits::fungible::Inspect`)*. It provides the concrete way +//! of inspecting the total issuance, balance of accounts, etc. There can be many implementations of +//! the same traits. +//! +//! The distinction between traits and trait implementations is helpful because it allows pallets +//! and other logic to be generic over their dependencies, avoiding tight coupling. +//! +//! To illustrate this with an example let's consider [`pallet_preimage`]. This pallet takes a +//! deposit in exchange for storing a preimage for later use. A naive implementation of the +//! pallet may use [`pallet_balances`] in a tightly coupled manner, directly calling methods +//! on the pallet to reserve and unreserve deposits. This approach works well, +//! until someone has a use case requiring that an asset from a different pallet such as +//! [`pallet_assets`] is used for the deposit. Rather than tightly couple [`pallet_preimage`] to +//! [`pallet_balances`], [`pallet_assets`], and every other token-handling pallet a user +//! could possibly specify, [`pallet_preimage`] does not specify a concrete pallet as a dependency +//! but instead accepts any dependency which implements the +//! [`currency::ReservableCurrency`](`frame_support::traits::tokens::currency::ReservableCurrency`) +//! trait, namely via its [`Config::Currency`](`pallet_preimage::pallet::Config::Currency`) +//! associated type. This allows [`pallet_preimage`] to support any arbitrary pallet implementing +//! this trait, without needing any knowledge of what those pallets may be or requiring changes to +//! support new pallets which may be written in the future. +//! +//! Read more about coupling, and the benefits of loose coupling +//! [here](crate::reference_docs::frame_pallet_coupling). +//! +//! ##### *Rust Advanced Tip +//! +//! The knowledge that [`pallet_balances`] implements +//! [`fungible::Inspect`](`frame_support::traits::fungible::Inspect`) is not some arcane knowledge +//! that you have to know by heart or memorize. One can simply look at the list of the implementors +//! of any trait in the Rust Doc to find all implementors (e.g. +//! ), +//! or use the `rust-analyzer` `Implementations` action. +//! +//! ## Fungible Token Traits in FRAME +//! +//! The [`fungible`](`frame_support::traits::fungible`) crate contains the latest set of FRAME +//! fungible token traits, and is recommended to use for all new logic requiring a fungible token. +//! See the crate documentation for more info about these fungible traits. +//! +//! [`fungibles`](`frame_support::traits::fungibles`) provides very similar functionality to +//! [`fungible`](`frame_support::traits::fungible`), except it supports managing multiple tokens. +//! +//! You may notice the trait [`Currency`](`frame_support::traits::Currency`) with similar +//! functionality is also used in the codebase, however this trait is deprecated and existing logic +//! is in the process of being migrated to [`fungible`](`frame_support::traits::fungible`) ([tracking issue](https://github.com/paritytech/polkadot-sdk/issues/226)). +//! +//! ## Fungible Token Trait Implementations in FRAME +//! +//! [`pallet_balances`] implements [`fungible`](`frame_support::traits::fungible`), and is the most +//! commonly used fungible implementation in FRAME. Most of the time, it's used for managing the +//! native token of the blockchain network it's used in. +//! +//! [`pallet_assets`] implements [`fungibles`](`frame_support::traits::fungibles`), and is another +//! popular fungible token implementation. It supports the creation and management of multiple +//! assets in a single crate, making it a good choice when a network requires more assets in +//! addition to its native token. +//! +//! ## Non-Fungible Tokens in FRAME +//! +//! [`pallet_nfts`] is recommended to use for all NFT use cases in FRAME. +//! See the crate documentation for more info about this pallet. +//! +//! [`pallet_uniques`] is deprecated and should not be used. +//! +//! +//! # What Next? +//! +//! - If you are interested in implementing a single fungible token, continue reading the +//! [`fungible`](`frame_support::traits::fungible`) and [`pallet_balances`] docs. +//! - If you are interested in implementing a set of fungible tokens, continue reading the +//! [`fungibles`](`frame_support::traits::fungibles`) trait and [`pallet_assets`] docs. +//! - If you are interested in implementing an NFT, continue reading the [`pallet_nfts`] docs. diff --git a/docs/sdk/src/reference_docs/mod.rs b/docs/sdk/src/reference_docs/mod.rs index de0b012bb126..145df8844f26 100644 --- a/docs/sdk/src/reference_docs/mod.rs +++ b/docs/sdk/src/reference_docs/mod.rs @@ -47,8 +47,7 @@ pub mod signed_extensions; pub mod frame_origin; /// Learn about how to write safe and defensive code in your FRAME runtime. -// TODO: @CrackTheCode016 https://github.com/paritytech/polkadot-sdk-docs/issues/44 -pub mod safe_defensive_programming; +pub mod defensive_programming; /// Learn about composite enums and other runtime level types, such as "RuntimeEvent" and /// "RuntimeCall". @@ -66,9 +65,6 @@ pub mod metadata; /// Learn about how frame-system handles `account-ids`, nonces, consumers and providers. pub mod frame_system_accounts; -/// Learn about the currency-related abstractions provided in FRAME. -pub mod frame_currency; - /// Advice for configuring your development environment for Substrate development. pub mod development_environment_advice; @@ -76,6 +72,9 @@ pub mod development_environment_advice; // TODO: @shawntabrizi @ggwpez https://github.com/paritytech/polkadot-sdk-docs/issues/50 pub mod frame_benchmarking_weight; +/// Learn about the token-related logic in FRAME and how to apply it to your use case. +pub mod frame_tokens; + /// Learn about chain specification file and the genesis state of the blockchain. // TODO: @michalkucharczyk https://github.com/paritytech/polkadot-sdk-docs/issues/51 pub mod chain_spec_genesis; diff --git a/docs/sdk/src/reference_docs/runtime_vs_smart_contract.rs b/docs/sdk/src/reference_docs/runtime_vs_smart_contract.rs index 099512cf4ee1..379b0c11b2ad 100644 --- a/docs/sdk/src/reference_docs/runtime_vs_smart_contract.rs +++ b/docs/sdk/src/reference_docs/runtime_vs_smart_contract.rs @@ -117,7 +117,7 @@ //! - **Contract Code Updates**: Once deployed, although typically immutable, Smart Contracts can be //! upgraded, but lack of migration logic. The [pallet_contracts](../../../pallet_contracts/index.html) //! allows for contracts to be upgraded by exposing the `set_code` dispatchable. More details on this -//! can be found in [Ink! documentation on upgradeable contracts](https://use.ink/5.x/basics/upgradeable-contracts). +//! can be found in [Ink! documentation on upgradeable contracts](https://use.ink/basics/upgradeable-contracts). //! - **Isolated Impact**: Upgrades or changes to a smart contract generally impact only that //! contract and its users, unlike Runtime upgrades that have a network-wide effect. //! - **Simplicity and Rapid Development**: The development cycle for Smart Contracts is usually diff --git a/docs/sdk/src/reference_docs/safe_defensive_programming.rs b/docs/sdk/src/reference_docs/safe_defensive_programming.rs deleted file mode 100644 index 9d0f028e570d..000000000000 --- a/docs/sdk/src/reference_docs/safe_defensive_programming.rs +++ /dev/null @@ -1 +0,0 @@ -//! diff --git a/polkadot/Cargo.toml b/polkadot/Cargo.toml index b0d71a18eaa1..659edcb041c3 100644 --- a/polkadot/Cargo.toml +++ b/polkadot/Cargo.toml @@ -45,8 +45,8 @@ tikv-jemallocator = { version = "0.5.0", features = ["unprefixed_malloc_on_suppo assert_cmd = "2.0.4" nix = { version = "0.26.1", features = ["signal"] } tempfile = "3.2.0" -tokio = "1.24.2" -substrate-rpc-client = { path = "../substrate/utils/frame/rpc/client/" } +tokio = "1.37" +substrate-rpc-client = { path = "../substrate/utils/frame/rpc/client" } polkadot-core-primitives = { path = "core-primitives" } [build-dependencies] diff --git a/polkadot/cli/Cargo.toml b/polkadot/cli/Cargo.toml index f57efa7ba436..b0c22c5a97fe 100644 --- a/polkadot/cli/Cargo.toml +++ b/polkadot/cli/Cargo.toml @@ -22,7 +22,7 @@ cfg-if = "1.0" clap = { version = "4.5.3", features = ["derive"], optional = true } log = { workspace = true, default-features = true } thiserror = { workspace = true } -futures = "0.3.21" +futures = "0.3.30" pyro = { package = "pyroscope", version = "0.5.3", optional = true } pyroscope_pprofrs = { version = "0.2", optional = true } diff --git a/polkadot/cli/src/cli.rs b/polkadot/cli/src/cli.rs index e1bc2309a942..74e190444693 100644 --- a/polkadot/cli/src/cli.rs +++ b/polkadot/cli/src/cli.rs @@ -122,7 +122,7 @@ pub struct RunCmd { /// Overseer message capacity override. /// - /// **Dangerous!** Do not touch unless explicitly adviced to. + /// **Dangerous!** Do not touch unless explicitly advised to. #[arg(long)] pub overseer_channel_capacity_override: Option, diff --git a/polkadot/core-primitives/Cargo.toml b/polkadot/core-primitives/Cargo.toml index 37e124608f9a..1b3f9054c1f4 100644 --- a/polkadot/core-primitives/Cargo.toml +++ b/polkadot/core-primitives/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] sp-core = { path = "../../substrate/primitives/core", default-features = false } sp-runtime = { path = "../../substrate/primitives/runtime", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } [features] diff --git a/polkadot/grafana/README.md b/polkadot/grafana/README.md index 7350001bfa1f..e909fdd29a75 100644 --- a/polkadot/grafana/README.md +++ b/polkadot/grafana/README.md @@ -8,7 +8,7 @@ monitor the liveliness and performance of a network and its validators. # How does it work ? Just import the dashboard JSON files from this folder in your Grafana installation. All dashboards are grouped in -folder percategory (like for example `parachains`). The files have been created by Grafana export functionality and +folder per category (like for example `parachains`). The files have been created by Grafana export functionality and follow the data model specified [here](https://grafana.com/docs/grafana/latest/dashboards/json-model/). We aim to keep the dashboards here in sync with the implementation, except dashboards for development and diff --git a/polkadot/node/collation-generation/Cargo.toml b/polkadot/node/collation-generation/Cargo.toml index 8df0c2b1edae..ebc53a9e01bb 100644 --- a/polkadot/node/collation-generation/Cargo.toml +++ b/polkadot/node/collation-generation/Cargo.toml @@ -10,7 +10,7 @@ description = "Collator-side subsystem that handles incoming candidate submissio workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" gum = { package = "tracing-gum", path = "../gum" } polkadot-erasure-coding = { path = "../../erasure-coding" } polkadot-node-primitives = { path = "../primitives" } @@ -26,4 +26,5 @@ parity-scale-codec = { version = "3.6.1", default-features = false, features = [ polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../primitives/test-helpers" } assert_matches = "1.4.0" +rstest = "0.18.2" sp-keyring = { path = "../../../substrate/primitives/keyring" } diff --git a/polkadot/node/collation-generation/src/error.rs b/polkadot/node/collation-generation/src/error.rs index ac5db6cd7f28..f04e3c4f20b4 100644 --- a/polkadot/node/collation-generation/src/error.rs +++ b/polkadot/node/collation-generation/src/error.rs @@ -27,7 +27,11 @@ pub enum Error { #[error(transparent)] Util(#[from] polkadot_node_subsystem_util::Error), #[error(transparent)] + UtilRuntime(#[from] polkadot_node_subsystem_util::runtime::Error), + #[error(transparent)] Erasure(#[from] polkadot_erasure_coding::Error), + #[error("Parachain backing state not available in runtime.")] + MissingParaBackingState, } pub type Result = std::result::Result; diff --git a/polkadot/node/collation-generation/src/lib.rs b/polkadot/node/collation-generation/src/lib.rs index a89351628a08..60ea1cf5ff48 100644 --- a/polkadot/node/collation-generation/src/lib.rs +++ b/polkadot/node/collation-generation/src/lib.rs @@ -43,13 +43,14 @@ use polkadot_node_subsystem::{ SubsystemContext, SubsystemError, SubsystemResult, }; use polkadot_node_subsystem_util::{ - request_async_backing_params, request_availability_cores, request_persisted_validation_data, - request_validation_code, request_validation_code_hash, request_validators, + request_async_backing_params, request_availability_cores, request_para_backing_state, + request_persisted_validation_data, request_validation_code, request_validation_code_hash, + request_validators, vstaging::fetch_claim_queue, }; use polkadot_primitives::{ collator_signature_payload, CandidateCommitments, CandidateDescriptor, CandidateReceipt, - CollatorPair, CoreState, Hash, Id as ParaId, OccupiedCoreAssumption, PersistedValidationData, - ValidationCodeHash, + CollatorPair, CoreIndex, CoreState, Hash, Id as ParaId, OccupiedCoreAssumption, + PersistedValidationData, ScheduledCore, ValidationCodeHash, }; use sp_core::crypto::Pair; use std::sync::Arc; @@ -208,6 +209,7 @@ async fn handle_new_activations( if config.collator.is_none() { return Ok(()) } + let para_id = config.para_id; let _overall_timer = metrics.time_new_activations(); @@ -221,28 +223,42 @@ async fn handle_new_activations( ); let availability_cores = availability_cores??; - let n_validators = validators??.len(); let async_backing_params = async_backing_params?.ok(); + let n_validators = validators??.len(); + let maybe_claim_queue = fetch_claim_queue(ctx.sender(), relay_parent) + .await + .map_err(crate::error::Error::UtilRuntime)?; - for (core_idx, core) in availability_cores.into_iter().enumerate() { - let _availability_core_timer = metrics.time_new_activations_availability_core(); + // The loop bellow will fill in cores that the para is allowed to build on. + let mut cores_to_build_on = Vec::new(); - let (scheduled_core, assumption) = match core { - CoreState::Scheduled(scheduled_core) => - (scheduled_core, OccupiedCoreAssumption::Free), + for (core_idx, core) in availability_cores.into_iter().enumerate() { + let scheduled_core = match core { + CoreState::Scheduled(scheduled_core) => scheduled_core, CoreState::Occupied(occupied_core) => match async_backing_params { Some(params) if params.max_candidate_depth >= 1 => { // maximum candidate depth when building on top of a block // pending availability is necessarily 1 - the depth of the // pending block is 0 so the child has depth 1. - // TODO [now]: this assumes that next up == current. - // in practice we should only set `OccupiedCoreAssumption::Included` - // when the candidate occupying the core is also of the same para. - if let Some(scheduled) = occupied_core.next_up_on_available { - (scheduled, OccupiedCoreAssumption::Included) - } else { - continue + // Use claim queue if available, or fallback to `next_up_on_available` + let res = match maybe_claim_queue { + Some(ref claim_queue) => { + // read what's in the claim queue for this core at depth 0. + claim_queue + .get_claim_for(CoreIndex(core_idx as u32), 0) + .map(|para_id| ScheduledCore { para_id, collator: None }) + }, + None => { + // Runtime doesn't support claim queue runtime api. Fallback to + // `next_up_on_available` + occupied_core.next_up_on_available + }, + }; + + match res { + Some(res) => res, + None => continue, } }, _ => { @@ -259,7 +275,7 @@ async fn handle_new_activations( gum::trace!( target: LOG_TARGET, core_idx = %core_idx, - "core is free. Keep going.", + "core is not assigned to any para. Keep going.", ); continue }, @@ -274,64 +290,90 @@ async fn handle_new_activations( their_para = %scheduled_core.para_id, "core is not assigned to our para. Keep going.", ); - continue + } else { + // Accumulate cores for building collation(s) outside the loop. + cores_to_build_on.push(CoreIndex(core_idx as u32)); } + } - // we get validation data and validation code synchronously for each core instead of - // within the subtask loop, because we have only a single mutable handle to the - // context, so the work can't really be distributed + // Skip to next relay parent if there is no core assigned to us. + if cores_to_build_on.is_empty() { + continue + } - let validation_data = match request_persisted_validation_data( - relay_parent, - scheduled_core.para_id, - assumption, - ctx.sender(), - ) - .await - .await?? - { - Some(v) => v, - None => { - gum::trace!( - target: LOG_TARGET, - core_idx = %core_idx, - relay_parent = ?relay_parent, - our_para = %config.para_id, - their_para = %scheduled_core.para_id, - "validation data is not available", - ); - continue - }, - }; + let para_backing_state = + request_para_backing_state(relay_parent, config.para_id, ctx.sender()) + .await + .await?? + .ok_or(crate::error::Error::MissingParaBackingState)?; + + // We are being very optimistic here, but one of the cores could pend availability some more + // block, ore even time out. + // For timeout assumption the collator can't really know because it doesn't receive bitfield + // gossip. + let assumption = if para_backing_state.pending_availability.is_empty() { + OccupiedCoreAssumption::Free + } else { + OccupiedCoreAssumption::Included + }; + + gum::debug!( + target: LOG_TARGET, + relay_parent = ?relay_parent, + our_para = %config.para_id, + ?assumption, + "Occupied core(s) assumption", + ); - let validation_code_hash = match obtain_validation_code_hash_with_assumption( - relay_parent, - scheduled_core.para_id, - assumption, - ctx.sender(), - ) - .await? - { - Some(v) => v, - None => { - gum::trace!( - target: LOG_TARGET, - core_idx = %core_idx, - relay_parent = ?relay_parent, - our_para = %config.para_id, - their_para = %scheduled_core.para_id, - "validation code hash is not found.", - ); - continue - }, - }; + let mut validation_data = match request_persisted_validation_data( + relay_parent, + config.para_id, + assumption, + ctx.sender(), + ) + .await + .await?? + { + Some(v) => v, + None => { + gum::debug!( + target: LOG_TARGET, + relay_parent = ?relay_parent, + our_para = %config.para_id, + "validation data is not available", + ); + continue + }, + }; - let task_config = config.clone(); - let metrics = metrics.clone(); - let mut task_sender = ctx.sender().clone(); - ctx.spawn( - "collation-builder", - Box::pin(async move { + let validation_code_hash = match obtain_validation_code_hash_with_assumption( + relay_parent, + config.para_id, + assumption, + ctx.sender(), + ) + .await? + { + Some(v) => v, + None => { + gum::debug!( + target: LOG_TARGET, + relay_parent = ?relay_parent, + our_para = %config.para_id, + "validation code hash is not found.", + ); + continue + }, + }; + + let task_config = config.clone(); + let metrics = metrics.clone(); + let mut task_sender = ctx.sender().clone(); + + ctx.spawn( + "chained-collation-builder", + Box::pin(async move { + for core_index in cores_to_build_on { let collator_fn = match task_config.collator.as_ref() { Some(x) => x, None => return, @@ -343,21 +385,23 @@ async fn handle_new_activations( None => { gum::debug!( target: LOG_TARGET, - para_id = %scheduled_core.para_id, + ?para_id, "collator returned no collation on collate", ); return }, }; + let parent_head = collation.head_data.clone(); construct_and_distribute_receipt( PreparedCollation { collation, - para_id: scheduled_core.para_id, + para_id, relay_parent, - validation_data, + validation_data: validation_data.clone(), validation_code_hash, n_validators, + core_index, }, task_config.key.clone(), &mut task_sender, @@ -365,9 +409,13 @@ async fn handle_new_activations( &metrics, ) .await; - }), - )?; - } + + // Chain the collations. All else stays the same as we build the chained + // collation on same relay parent. + validation_data.parent_head = parent_head; + } + }), + )?; } Ok(()) @@ -388,6 +436,7 @@ async fn handle_submit_collation( parent_head, validation_code_hash, result_sender, + core_index, } = params; let validators = request_validators(relay_parent, ctx.sender()).await.await??; @@ -424,6 +473,7 @@ async fn handle_submit_collation( validation_data, validation_code_hash, n_validators, + core_index, }; construct_and_distribute_receipt( @@ -445,6 +495,7 @@ struct PreparedCollation { validation_data: PersistedValidationData, validation_code_hash: ValidationCodeHash, n_validators: usize, + core_index: CoreIndex, } /// Takes a prepared collation, along with its context, and produces a candidate receipt @@ -463,6 +514,7 @@ async fn construct_and_distribute_receipt( validation_data, validation_code_hash, n_validators, + core_index, } = collation; let persisted_validation_data_hash = validation_data.hash(); @@ -558,6 +610,7 @@ async fn construct_and_distribute_receipt( pov, parent_head_data, result_sender, + core_index, }) .await; } diff --git a/polkadot/node/collation-generation/src/tests.rs b/polkadot/node/collation-generation/src/tests.rs index eb0ede6ef6b1..1ec2cccfae71 100644 --- a/polkadot/node/collation-generation/src/tests.rs +++ b/polkadot/node/collation-generation/src/tests.rs @@ -25,15 +25,24 @@ use polkadot_node_primitives::{BlockData, Collation, CollationResult, MaybeCompr use polkadot_node_subsystem::{ errors::RuntimeApiError, messages::{AllMessages, RuntimeApiMessage, RuntimeApiRequest}, + ActivatedLeaf, }; use polkadot_node_subsystem_test_helpers::{subsystem_test_harness, TestSubsystemContextHandle}; use polkadot_node_subsystem_util::TimeoutExt; use polkadot_primitives::{ - CollatorPair, HeadData, Id as ParaId, PersistedValidationData, ScheduledCore, ValidationCode, + async_backing::{BackingState, CandidatePendingAvailability}, + AsyncBackingParams, BlockNumber, CollatorPair, HeadData, PersistedValidationData, + ScheduledCore, ValidationCode, }; +use rstest::rstest; use sp_keyring::sr25519::Keyring as Sr25519Keyring; -use std::pin::Pin; -use test_helpers::{dummy_hash, dummy_head_data, dummy_validator}; +use std::{ + collections::{BTreeMap, VecDeque}, + pin::Pin, +}; +use test_helpers::{ + dummy_candidate_descriptor, dummy_hash, dummy_head_data, dummy_validator, make_candidate, +}; type VirtualOverseer = TestSubsystemContextHandle; @@ -102,9 +111,9 @@ impl Future for TestCollator { impl Unpin for TestCollator {} -async fn overseer_recv(overseer: &mut VirtualOverseer) -> AllMessages { - const TIMEOUT: std::time::Duration = std::time::Duration::from_millis(2000); +const TIMEOUT: std::time::Duration = std::time::Duration::from_millis(2000); +async fn overseer_recv(overseer: &mut VirtualOverseer) -> AllMessages { overseer .recv() .timeout(TIMEOUT) @@ -132,8 +141,45 @@ fn scheduled_core_for>(para_id: Id) -> ScheduledCore { ScheduledCore { para_id: para_id.into(), collator: None } } -#[test] -fn requests_availability_per_relay_parent() { +fn dummy_candidate_pending_availability( + para_id: ParaId, + candidate_relay_parent: Hash, + relay_parent_number: BlockNumber, +) -> CandidatePendingAvailability { + let (candidate, _pvd) = make_candidate( + candidate_relay_parent, + relay_parent_number, + para_id, + dummy_head_data(), + HeadData(vec![1]), + ValidationCode(vec![1, 2, 3]).hash(), + ); + let candidate_hash = candidate.hash(); + + CandidatePendingAvailability { + candidate_hash, + descriptor: candidate.descriptor, + commitments: candidate.commitments, + relay_parent_number, + max_pov_size: 5 * 1024 * 1024, + } +} + +fn dummy_backing_state(pending_availability: Vec) -> BackingState { + let constraints = helpers::dummy_constraints( + 0, + vec![0], + dummy_head_data(), + ValidationCodeHash::from(Hash::repeat_byte(42)), + ); + + BackingState { constraints, pending_availability } +} + +#[rstest] +#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT - 1)] +#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)] +fn requests_availability_per_relay_parent(#[case] runtime_version: u32) { let activated_hashes: Vec = vec![[1; 32].into(), [4; 32].into(), [9; 32].into(), [16; 32].into()]; @@ -159,6 +205,24 @@ fn requests_availability_per_relay_parent() { ))) => { tx.send(Err(RuntimeApiError::NotSupported { runtime_api_name: "doesnt_matter" })).unwrap(); }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::Version(tx), + ))) => { + tx.send(Ok(runtime_version)).unwrap(); + }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::ClaimQueue(tx), + ))) if runtime_version >= RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT => { + tx.send(Ok(BTreeMap::new())).unwrap(); + }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::ParaBackingState(_para_id, tx), + ))) => { + tx.send(Ok(Some(dummy_backing_state(vec![])))).unwrap(); + }, Some(msg) => panic!("didn't expect any other overseer requests given no availability cores; got {:?}", msg), } } @@ -184,8 +248,10 @@ fn requests_availability_per_relay_parent() { assert_eq!(requested_availability_cores, activated_hashes); } -#[test] -fn requests_validation_data_for_scheduled_matches() { +#[rstest] +#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT - 1)] +#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)] +fn requests_validation_data_for_scheduled_matches(#[case] runtime_version: u32) { let activated_hashes: Vec = vec![ Hash::repeat_byte(1), Hash::repeat_byte(4), @@ -242,6 +308,24 @@ fn requests_validation_data_for_scheduled_matches() { })) .unwrap(); }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::Version(tx), + ))) => { + tx.send(Ok(runtime_version)).unwrap(); + }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::ClaimQueue(tx), + ))) if runtime_version >= RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT => { + tx.send(Ok(BTreeMap::new())).unwrap(); + }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::ParaBackingState(_para_id, tx), + ))) => { + tx.send(Ok(Some(dummy_backing_state(vec![])))).unwrap(); + }, Some(msg) => { panic!("didn't expect any other overseer requests; got {:?}", msg) }, @@ -271,8 +355,10 @@ fn requests_validation_data_for_scheduled_matches() { assert_eq!(requested_validation_data, vec![[4; 32].into()]); } -#[test] -fn sends_distribute_collation_message() { +#[rstest] +#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT - 1)] +#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)] +fn sends_distribute_collation_message(#[case] runtime_version: u32) { let activated_hashes: Vec = vec![ Hash::repeat_byte(1), Hash::repeat_byte(4), @@ -339,6 +425,24 @@ fn sends_distribute_collation_message() { })) .unwrap(); }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::Version(tx), + ))) => { + tx.send(Ok(runtime_version)).unwrap(); + }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::ClaimQueue(tx), + ))) if runtime_version >= RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT => { + tx.send(Ok(BTreeMap::new())).unwrap(); + }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::ParaBackingState(_para_id, tx), + ))) => { + tx.send(Ok(Some(dummy_backing_state(vec![])))).unwrap(); + }, Some(msg @ AllMessages::CollatorProtocol(_)) => { inner_to_collator_protocol.lock().await.push(msg); }, @@ -423,8 +527,10 @@ fn sends_distribute_collation_message() { } } -#[test] -fn fallback_when_no_validation_code_hash_api() { +#[rstest] +#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT - 1)] +#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)] +fn fallback_when_no_validation_code_hash_api(#[case] runtime_version: u32) { // This is a variant of the above test, but with the validation code hash API disabled. let activated_hashes: Vec = vec![ @@ -501,9 +607,27 @@ fn fallback_when_no_validation_code_hash_api() { })) .unwrap(); }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::Version(tx), + ))) => { + tx.send(Ok(runtime_version)).unwrap(); + }, Some(msg @ AllMessages::CollatorProtocol(_)) => { inner_to_collator_protocol.lock().await.push(msg); }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::ClaimQueue(tx), + ))) if runtime_version >= RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT => { + tx.send(Ok(Default::default())).unwrap(); + }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::ParaBackingState(_para_id, tx), + ))) => { + tx.send(Ok(Some(dummy_backing_state(vec![])))).unwrap(); + }, Some(msg) => { panic!("didn't expect any other overseer requests; got {:?}", msg) }, @@ -551,6 +675,7 @@ fn submit_collation_is_no_op_before_initialization() { parent_head: vec![1, 2, 3].into(), validation_code_hash: Hash::repeat_byte(1).into(), result_sender: None, + core_index: CoreIndex(0), }), }) .await; @@ -563,7 +688,7 @@ fn submit_collation_is_no_op_before_initialization() { fn submit_collation_leads_to_distribution() { let relay_parent = Hash::repeat_byte(0); let validation_code_hash = ValidationCodeHash::from(Hash::repeat_byte(42)); - let parent_head = HeadData::from(vec![1, 2, 3]); + let parent_head = dummy_head_data(); let para_id = ParaId::from(5); let expected_pvd = PersistedValidationData { parent_head: parent_head.clone(), @@ -584,9 +709,10 @@ fn submit_collation_leads_to_distribution() { msg: CollationGenerationMessage::SubmitCollation(SubmitCollationParams { relay_parent, collation: test_collation(), - parent_head: vec![1, 2, 3].into(), + parent_head: dummy_head_data(), validation_code_hash, result_sender: None, + core_index: CoreIndex(0), }), }) .await; @@ -635,3 +761,435 @@ fn submit_collation_leads_to_distribution() { virtual_overseer }); } + +// There is one core in `Occupied` state and async backing is enabled. On new head activation +// `CollationGeneration` should produce and distribute a new collation. +#[rstest] +#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT - 1)] +#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)] +fn distribute_collation_for_occupied_core_with_async_backing_enabled(#[case] runtime_version: u32) { + let activated_hash: Hash = [1; 32].into(); + let para_id = ParaId::from(5); + + // One core, in occupied state. The data in `CoreState` and `ClaimQueue` should match. + let cores: Vec = vec![CoreState::Occupied(polkadot_primitives::OccupiedCore { + next_up_on_available: Some(ScheduledCore { para_id, collator: None }), + occupied_since: 1, + time_out_at: 10, + next_up_on_time_out: Some(ScheduledCore { para_id, collator: None }), + availability: Default::default(), // doesn't matter + group_responsible: polkadot_primitives::GroupIndex(0), + candidate_hash: Default::default(), + candidate_descriptor: dummy_candidate_descriptor(dummy_hash()), + })]; + let claim_queue = BTreeMap::from([(CoreIndex::from(0), VecDeque::from([para_id]))]).into(); + + test_harness(|mut virtual_overseer| async move { + helpers::initialize_collator(&mut virtual_overseer, para_id).await; + helpers::activate_new_head(&mut virtual_overseer, activated_hash).await; + + let pending_availability = + vec![dummy_candidate_pending_availability(para_id, activated_hash, 1)]; + helpers::handle_runtime_calls_on_new_head_activation( + &mut virtual_overseer, + activated_hash, + AsyncBackingParams { max_candidate_depth: 1, allowed_ancestry_len: 1 }, + cores, + runtime_version, + claim_queue, + ) + .await; + helpers::handle_cores_processing_for_a_leaf( + &mut virtual_overseer, + activated_hash, + para_id, + // `CoreState` is `Occupied` => `OccupiedCoreAssumption` is `Included` + OccupiedCoreAssumption::Included, + 1, + pending_availability, + ) + .await; + + virtual_overseer + }); +} + +// There are variable number of cores of cores in `Occupied` state and async backing is enabled. +// On new head activation `CollationGeneration` should produce and distribute a new collation +// with proper assumption about the para candidate chain availability at next block. +#[rstest] +#[case(0)] +#[case(1)] +#[case(2)] +fn distribute_collation_for_occupied_cores_with_async_backing_enabled_and_elastic_scaling( + #[case] candidates_pending_avail: u32, +) { + let activated_hash: Hash = [1; 32].into(); + let para_id = ParaId::from(5); + + let cores = (0..3) + .into_iter() + .map(|idx| { + CoreState::Occupied(polkadot_primitives::OccupiedCore { + next_up_on_available: Some(ScheduledCore { para_id, collator: None }), + occupied_since: 0, + time_out_at: 10, + next_up_on_time_out: Some(ScheduledCore { para_id, collator: None }), + availability: Default::default(), // doesn't matter + group_responsible: polkadot_primitives::GroupIndex(idx as u32), + candidate_hash: Default::default(), + candidate_descriptor: dummy_candidate_descriptor(dummy_hash()), + }) + }) + .collect::>(); + + let pending_availability = (0..candidates_pending_avail) + .into_iter() + .map(|_idx| dummy_candidate_pending_availability(para_id, activated_hash, 0)) + .collect::>(); + + let claim_queue = cores + .iter() + .enumerate() + .map(|(idx, _core)| (CoreIndex::from(idx as u32), VecDeque::from([para_id]))) + .collect::>(); + let total_cores = cores.len(); + + test_harness(|mut virtual_overseer| async move { + helpers::initialize_collator(&mut virtual_overseer, para_id).await; + helpers::activate_new_head(&mut virtual_overseer, activated_hash).await; + helpers::handle_runtime_calls_on_new_head_activation( + &mut virtual_overseer, + activated_hash, + AsyncBackingParams { max_candidate_depth: 1, allowed_ancestry_len: 1 }, + cores, + // Using latest runtime with the fancy claim queue exposed. + RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT, + claim_queue, + ) + .await; + + helpers::handle_cores_processing_for_a_leaf( + &mut virtual_overseer, + activated_hash, + para_id, + // if at least 1 cores is occupied => `OccupiedCoreAssumption` is `Included` + // else assumption is `Free`. + if candidates_pending_avail > 0 { + OccupiedCoreAssumption::Included + } else { + OccupiedCoreAssumption::Free + }, + total_cores, + pending_availability, + ) + .await; + + virtual_overseer + }); +} + +// There are variable number of cores of cores in `Free` state and async backing is enabled. +// On new head activation `CollationGeneration` should produce and distribute a new collation +// with proper assumption about the para candidate chain availability at next block. +#[rstest] +#[case(0)] +#[case(1)] +#[case(2)] +fn distribute_collation_for_free_cores_with_async_backing_enabled_and_elastic_scaling( + #[case] total_cores: usize, +) { + let activated_hash: Hash = [1; 32].into(); + let para_id = ParaId::from(5); + + let cores = (0..total_cores) + .into_iter() + .map(|_idx| CoreState::Scheduled(ScheduledCore { para_id, collator: None })) + .collect::>(); + + let claim_queue = cores + .iter() + .enumerate() + .map(|(idx, _core)| (CoreIndex::from(idx as u32), VecDeque::from([para_id]))) + .collect::>(); + + test_harness(|mut virtual_overseer| async move { + helpers::initialize_collator(&mut virtual_overseer, para_id).await; + helpers::activate_new_head(&mut virtual_overseer, activated_hash).await; + helpers::handle_runtime_calls_on_new_head_activation( + &mut virtual_overseer, + activated_hash, + AsyncBackingParams { max_candidate_depth: 1, allowed_ancestry_len: 1 }, + cores, + // Using latest runtime with the fancy claim queue exposed. + RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT, + claim_queue, + ) + .await; + + helpers::handle_cores_processing_for_a_leaf( + &mut virtual_overseer, + activated_hash, + para_id, + // `CoreState` is `Free` => `OccupiedCoreAssumption` is `Free` + OccupiedCoreAssumption::Free, + total_cores, + vec![], + ) + .await; + + virtual_overseer + }); +} + +// There is one core in `Occupied` state and async backing is disabled. On new head activation +// no new collation should be generated. +#[rstest] +#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT - 1)] +#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)] +fn no_collation_is_distributed_for_occupied_core_with_async_backing_disabled( + #[case] runtime_version: u32, +) { + let activated_hash: Hash = [1; 32].into(); + let para_id = ParaId::from(5); + + // One core, in occupied state. The data in `CoreState` and `ClaimQueue` should match. + let cores: Vec = vec![CoreState::Occupied(polkadot_primitives::OccupiedCore { + next_up_on_available: Some(ScheduledCore { para_id, collator: None }), + occupied_since: 1, + time_out_at: 10, + next_up_on_time_out: Some(ScheduledCore { para_id, collator: None }), + availability: Default::default(), // doesn't matter + group_responsible: polkadot_primitives::GroupIndex(0), + candidate_hash: Default::default(), + candidate_descriptor: dummy_candidate_descriptor(dummy_hash()), + })]; + let claim_queue = BTreeMap::from([(CoreIndex::from(0), VecDeque::from([para_id]))]).into(); + + test_harness(|mut virtual_overseer| async move { + helpers::initialize_collator(&mut virtual_overseer, para_id).await; + helpers::activate_new_head(&mut virtual_overseer, activated_hash).await; + + helpers::handle_runtime_calls_on_new_head_activation( + &mut virtual_overseer, + activated_hash, + AsyncBackingParams { max_candidate_depth: 0, allowed_ancestry_len: 0 }, + cores, + runtime_version, + claim_queue, + ) + .await; + + virtual_overseer + }); +} + +mod helpers { + use polkadot_primitives::{ + async_backing::{Constraints, InboundHrmpLimitations}, + BlockNumber, + }; + + use super::*; + + // A set for dummy constraints for `ParaBackingState`` + pub(crate) fn dummy_constraints( + min_relay_parent_number: BlockNumber, + valid_watermarks: Vec, + required_parent: HeadData, + validation_code_hash: ValidationCodeHash, + ) -> Constraints { + Constraints { + min_relay_parent_number, + max_pov_size: 5 * 1024 * 1024, + max_code_size: 1_000_000, + ump_remaining: 10, + ump_remaining_bytes: 1_000, + max_ump_num_per_candidate: 10, + dmp_remaining_messages: vec![], + hrmp_inbound: InboundHrmpLimitations { valid_watermarks }, + hrmp_channels_out: vec![], + max_hrmp_num_per_candidate: 0, + required_parent, + validation_code_hash, + upgrade_restriction: None, + future_validation_code: None, + } + } + + // Sends `Initialize` with a collator config + pub async fn initialize_collator(virtual_overseer: &mut VirtualOverseer, para_id: ParaId) { + virtual_overseer + .send(FromOrchestra::Communication { + msg: CollationGenerationMessage::Initialize(test_config(para_id)), + }) + .await; + } + + // Sends `ActiveLeaves` for a single leaf with the specified hash. Block number is hardcoded. + pub async fn activate_new_head(virtual_overseer: &mut VirtualOverseer, activated_hash: Hash) { + virtual_overseer + .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate { + activated: Some(ActivatedLeaf { + hash: activated_hash, + number: 10, + unpin_handle: polkadot_node_subsystem_test_helpers::mock::dummy_unpin_handle( + activated_hash, + ), + span: Arc::new(overseer::jaeger::Span::Disabled), + }), + ..Default::default() + }))) + .await; + } + + // Handle all runtime calls performed in `handle_new_activations`. Conditionally expects a + // `CLAIM_QUEUE_RUNTIME_REQUIREMENT` call if the passed `runtime_version` is greater or equal to + // `CLAIM_QUEUE_RUNTIME_REQUIREMENT` + pub async fn handle_runtime_calls_on_new_head_activation( + virtual_overseer: &mut VirtualOverseer, + activated_hash: Hash, + async_backing_params: AsyncBackingParams, + cores: Vec, + runtime_version: u32, + claim_queue: BTreeMap>, + ) { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(hash, RuntimeApiRequest::AvailabilityCores(tx))) => { + assert_eq!(hash, activated_hash); + let _ = tx.send(Ok(cores)); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(hash, RuntimeApiRequest::Validators(tx))) => { + assert_eq!(hash, activated_hash); + let _ = tx.send(Ok(vec![ + Sr25519Keyring::Alice.public().into(), + Sr25519Keyring::Bob.public().into(), + Sr25519Keyring::Charlie.public().into(), + ])); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + hash, + RuntimeApiRequest::AsyncBackingParams( + tx, + ), + )) => { + assert_eq!(hash, activated_hash); + let _ = tx.send(Ok(async_backing_params)); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + hash, + RuntimeApiRequest::Version(tx), + )) => { + assert_eq!(hash, activated_hash); + let _ = tx.send(Ok(runtime_version)); + } + ); + + if runtime_version == RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + hash, + RuntimeApiRequest::ClaimQueue(tx), + )) => { + assert_eq!(hash, activated_hash); + let _ = tx.send(Ok(claim_queue.into())); + } + ); + } + } + + // Handles all runtime requests performed in `handle_new_activations` for the case when a + // collation should be prepared for the new leaf + pub async fn handle_cores_processing_for_a_leaf( + virtual_overseer: &mut VirtualOverseer, + activated_hash: Hash, + para_id: ParaId, + expected_occupied_core_assumption: OccupiedCoreAssumption, + cores_assigned: usize, + pending_availability: Vec, + ) { + // Expect no messages if no cores is assigned to the para + if cores_assigned == 0 { + assert!(overseer_recv(virtual_overseer).timeout(TIMEOUT / 2).await.is_none()); + return + } + + // Some hardcoded data - if needed, extract to parameters + let validation_code_hash = ValidationCodeHash::from(Hash::repeat_byte(42)); + let parent_head = dummy_head_data(); + let pvd = PersistedValidationData { + parent_head: parent_head.clone(), + relay_parent_number: 10, + relay_parent_storage_root: Hash::repeat_byte(1), + max_pov_size: 1024, + }; + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::ParaBackingState(p_id, tx)) + ) if parent == activated_hash && p_id == para_id => { + tx.send(Ok(Some(dummy_backing_state(pending_availability)))).unwrap(); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(hash, RuntimeApiRequest::PersistedValidationData(id, a, tx))) => { + assert_eq!(hash, activated_hash); + assert_eq!(id, para_id); + assert_eq!(a, expected_occupied_core_assumption); + + let _ = tx.send(Ok(Some(pvd.clone()))); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + hash, + RuntimeApiRequest::ValidationCodeHash( + id, + assumption, + tx, + ), + )) => { + assert_eq!(hash, activated_hash); + assert_eq!(id, para_id); + assert_eq!(assumption, expected_occupied_core_assumption); + + let _ = tx.send(Ok(Some(validation_code_hash))); + } + ); + + for _ in 0..cores_assigned { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::CollatorProtocol(CollatorProtocolMessage::DistributeCollation{ + candidate_receipt, + parent_head_data_hash, + .. + }) => { + assert_eq!(parent_head_data_hash, parent_head.hash()); + assert_eq!(candidate_receipt.descriptor().persisted_validation_data_hash, pvd.hash()); + assert_eq!(candidate_receipt.descriptor().para_head, dummy_head_data().hash()); + assert_eq!(candidate_receipt.descriptor().validation_code_hash, validation_code_hash); + } + ); + } + } +} diff --git a/polkadot/node/core/approval-voting/Cargo.toml b/polkadot/node/core/approval-voting/Cargo.toml index 2a5b6198b9a8..ced7706c40a2 100644 --- a/polkadot/node/core/approval-voting/Cargo.toml +++ b/polkadot/node/core/approval-voting/Cargo.toml @@ -10,7 +10,7 @@ description = "Approval Voting Subsystem of the Polkadot node" workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } gum = { package = "tracing-gum", path = "../../gum" } @@ -41,7 +41,7 @@ rand_chacha = { version = "0.3.1" } rand = "0.8.5" [dev-dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" parking_lot = "0.12.1" sp-keyring = { path = "../../../../substrate/primitives/keyring" } sp-keystore = { path = "../../../../substrate/primitives/keystore" } @@ -52,4 +52,4 @@ assert_matches = "1.4.0" kvdb-memorydb = "0.13.0" test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../../primitives/test-helpers" } log = { workspace = true, default-features = true } -env_logger = "0.9.0" +env_logger = "0.11" diff --git a/polkadot/node/core/approval-voting/src/approval_db/v1/tests.rs b/polkadot/node/core/approval-voting/src/approval_db/v1/tests.rs index b979cb7ef45f..b0966ad01f7b 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/v1/tests.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/v1/tests.rs @@ -254,7 +254,7 @@ fn canonicalize_works() { // -> B1 -> C1 -> D1 // A -> B2 -> C2 -> D2 // - // We'll canonicalize C1. Everytning except D1 should disappear. + // We'll canonicalize C1. Everything except D1 should disappear. // // Candidates: // Cand1 in B2 diff --git a/polkadot/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs b/polkadot/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs index df6e4754dbd6..1081d79884f7 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs @@ -79,7 +79,7 @@ pub fn v1_to_latest(db: Arc, config: Config) -> Result<()> { block.candidates().iter().enumerate() { // Loading the candidate will also perform the conversion to the updated format and - // return that represantation. + // return that representation. if let Some(candidate_entry) = backend .load_candidate_entry_v1(&candidate_hash, candidate_index as CandidateIndex) .map_err(|e| Error::InternalError(e))? diff --git a/polkadot/node/core/approval-voting/src/approval_db/v2/tests.rs b/polkadot/node/core/approval-voting/src/approval_db/v2/tests.rs index 6021b44c2765..5fa915add416 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/v2/tests.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/v2/tests.rs @@ -269,7 +269,7 @@ fn canonicalize_works() { // -> B1 -> C1 -> D1 // A -> B2 -> C2 -> D2 // - // We'll canonicalize C1. Everytning except D1 should disappear. + // We'll canonicalize C1. Everything except D1 should disappear. // // Candidates: // Cand1 in B2 diff --git a/polkadot/node/core/approval-voting/src/approval_db/v3/migration_helpers.rs b/polkadot/node/core/approval-voting/src/approval_db/v3/migration_helpers.rs index ad5e89ef3de8..d1e7ee08225b 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/v3/migration_helpers.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/v3/migration_helpers.rs @@ -60,7 +60,7 @@ pub fn v2_to_latest(db: Arc, config: Config) -> Result<()> { block.candidates().iter().enumerate() { // Loading the candidate will also perform the conversion to the updated format and - // return that represantation. + // return that representation. if let Some(candidate_entry) = backend .load_candidate_entry_v2(&candidate_hash, candidate_index as CandidateIndex) .map_err(|e| Error::InternalError(e))? @@ -104,7 +104,7 @@ pub fn v1_to_latest_sanity_check( for block in all_blocks { for (_core_index, candidate_hash) in block.candidates() { // Loading the candidate will also perform the conversion to the updated format and - // return that represantation. + // return that representation. if let Some(candidate_entry) = backend.load_candidate_entry(&candidate_hash).unwrap() { candidates.insert(candidate_entry.candidate.hash()); } diff --git a/polkadot/node/core/approval-voting/src/approval_db/v3/tests.rs b/polkadot/node/core/approval-voting/src/approval_db/v3/tests.rs index 08c65461bca8..7c0cf9d4f7da 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/v3/tests.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/v3/tests.rs @@ -264,7 +264,7 @@ fn canonicalize_works() { // -> B1 -> C1 -> D1 // A -> B2 -> C2 -> D2 // - // We'll canonicalize C1. Everytning except D1 should disappear. + // We'll canonicalize C1. Everything except D1 should disappear. // // Candidates: // Cand1 in B2 diff --git a/polkadot/node/core/approval-voting/src/criteria.rs b/polkadot/node/core/approval-voting/src/criteria.rs index 1ebea2641b62..57c0ac272dc5 100644 --- a/polkadot/node/core/approval-voting/src/criteria.rs +++ b/polkadot/node/core/approval-voting/src/criteria.rs @@ -148,7 +148,7 @@ fn relay_vrf_modulo_cores( generate_samples(rand_chacha, num_samples as usize, max_cores as usize) } -/// Generates `num_sumples` randomly from (0..max_cores) range +/// Generates `num_samples` randomly from (0..max_cores) range /// /// Note! The algorithm can't change because validators on the other /// side won't be able to check the assignments until they update. diff --git a/polkadot/node/core/approval-voting/src/import.rs b/polkadot/node/core/approval-voting/src/import.rs index 7a56e9fd1129..f4be42a48450 100644 --- a/polkadot/node/core/approval-voting/src/import.rs +++ b/polkadot/node/core/approval-voting/src/import.rs @@ -45,8 +45,8 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_util::{determine_new_blocks, runtime::RuntimeInfo}; use polkadot_primitives::{ - vstaging::node_features, BlockNumber, CandidateEvent, CandidateHash, CandidateReceipt, - ConsensusLog, CoreIndex, GroupIndex, Hash, Header, SessionIndex, + node_features, BlockNumber, CandidateEvent, CandidateHash, CandidateReceipt, ConsensusLog, + CoreIndex, GroupIndex, Hash, Header, SessionIndex, }; use sc_keystore::LocalKeystore; use sp_consensus_slots::Slot; @@ -91,7 +91,7 @@ enum ImportedBlockInfoError { #[error(transparent)] RuntimeError(RuntimeApiError), - #[error("future cancalled while requesting {0}")] + #[error("future cancelled while requesting {0}")] FutureCancelled(&'static str, futures::channel::oneshot::Canceled), #[error(transparent)] @@ -619,8 +619,8 @@ pub(crate) mod tests { use polkadot_node_subsystem_test_helpers::make_subsystem_context; use polkadot_node_subsystem_util::database::Database; use polkadot_primitives::{ - vstaging::{node_features::FeatureIndex, NodeFeatures}, - ExecutorParams, Id as ParaId, IndexedVec, SessionInfo, ValidatorId, ValidatorIndex, + node_features::FeatureIndex, ExecutorParams, Id as ParaId, IndexedVec, NodeFeatures, + SessionInfo, ValidatorId, ValidatorIndex, }; pub(crate) use sp_consensus_babe::{ digests::{CompatibleDigestItem, PreDigest, SecondaryVRFPreDigest}, diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs index 3ab70a2c9da8..9e275b9db98e 100644 --- a/polkadot/node/core/approval-voting/src/lib.rs +++ b/polkadot/node/core/approval-voting/src/lib.rs @@ -53,10 +53,10 @@ use polkadot_node_subsystem_util::{ TimeoutExt, }; use polkadot_primitives::{ - vstaging::{ApprovalVoteMultipleCandidates, ApprovalVotingParams}, - BlockNumber, CandidateHash, CandidateIndex, CandidateReceipt, DisputeStatement, ExecutorParams, - GroupIndex, Hash, PvfExecKind, SessionIndex, SessionInfo, ValidDisputeStatementKind, - ValidatorId, ValidatorIndex, ValidatorPair, ValidatorSignature, + ApprovalVoteMultipleCandidates, ApprovalVotingParams, BlockNumber, CandidateHash, + CandidateIndex, CandidateReceipt, CoreIndex, DisputeStatement, ExecutorParams, GroupIndex, + Hash, PvfExecKind, SessionIndex, SessionInfo, ValidDisputeStatementKind, ValidatorId, + ValidatorIndex, ValidatorPair, ValidatorSignature, }; use sc_keystore::LocalKeystore; use sp_application_crypto::Pair; @@ -1296,6 +1296,21 @@ fn cores_to_candidate_indices( CandidateBitfield::try_from(candidate_indices) } +// Returns the claimed core bitfield from the assignment cert and the core index +// from the block entry. +fn get_core_indices_on_startup( + assignment: &AssignmentCertKindV2, + block_entry_core_index: CoreIndex, +) -> CoreBitfield { + match &assignment { + AssignmentCertKindV2::RelayVRFModuloCompact { core_bitfield } => core_bitfield.clone(), + AssignmentCertKindV2::RelayVRFModulo { sample: _ } => + CoreBitfield::try_from(vec![block_entry_core_index]).expect("Not an empty vec; qed"), + AssignmentCertKindV2::RelayVRFDelay { core_index } => + CoreBitfield::try_from(vec![*core_index]).expect("Not an empty vec; qed"), + } +} + // Returns the claimed core bitfield from the assignment cert, the candidate hash and a // `BlockEntry`. Can fail only for VRF Delay assignments for which we cannot find the candidate hash // in the block entry which indicates a bug or corrupted storage. @@ -1366,7 +1381,7 @@ async fn distribution_messages_for_activation( session: block_entry.session(), }); let mut signatures_queued = HashSet::new(); - for (_, candidate_hash) in block_entry.candidates() { + for (core_index, candidate_hash) in block_entry.candidates() { let _candidate_span = distribution_message_span.child("candidate").with_candidate(*candidate_hash); let candidate_entry = match db.load_candidate_entry(&candidate_hash)? { @@ -1388,152 +1403,121 @@ async fn distribution_messages_for_activation( match approval_entry.local_statements() { (None, None) | (None, Some(_)) => {}, // second is impossible case. (Some(assignment), None) => { - if let Some(claimed_core_indices) = get_assignment_core_indices( - &assignment.cert().kind, - &candidate_hash, - &block_entry, - ) { - if block_entry.has_candidates_pending_signature() { - delayed_approvals_timers.maybe_arm_timer( - state.clock.tick_now(), - state.clock.as_ref(), - block_entry.block_hash(), - assignment.validator_index(), - ) - } + let claimed_core_indices = + get_core_indices_on_startup(&assignment.cert().kind, *core_index); + + if block_entry.has_candidates_pending_signature() { + delayed_approvals_timers.maybe_arm_timer( + state.clock.tick_now(), + state.clock.as_ref(), + block_entry.block_hash(), + assignment.validator_index(), + ) + } - match cores_to_candidate_indices( - &claimed_core_indices, - &block_entry, - ) { - Ok(bitfield) => { - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?candidate_entry.candidate_receipt().hash(), - ?block_hash, - "Discovered, triggered assignment, not approved yet", - ); - - let indirect_cert = IndirectAssignmentCertV2 { - block_hash, - validator: assignment.validator_index(), - cert: assignment.cert().clone(), - }; - messages.push( - ApprovalDistributionMessage::DistributeAssignment( - indirect_cert.clone(), - bitfield.clone(), - ), - ); - - if !block_entry - .candidate_is_pending_signature(*candidate_hash) - { - let ExtendedSessionInfo { ref executor_params, .. } = - match get_extended_session_info( - session_info_provider, - ctx.sender(), - block_entry.block_hash(), - block_entry.session(), - ) - .await - { - Some(i) => i, - None => continue, - }; - - actions.push(Action::LaunchApproval { - claimed_candidate_indices: bitfield, - candidate_hash: candidate_entry - .candidate_receipt() - .hash(), - indirect_cert, - assignment_tranche: assignment.tranche(), - relay_block_hash: block_hash, - session: block_entry.session(), - executor_params: executor_params.clone(), - candidate: candidate_entry - .candidate_receipt() - .clone(), - backing_group: approval_entry.backing_group(), - distribute_assignment: false, - }); - } - }, - Err(err) => { - // Should never happen. If we fail here it means the - // assignment is null (no cores claimed). - gum::warn!( - target: LOG_TARGET, - ?block_hash, - ?candidate_hash, - ?err, - "Failed to create assignment bitfield", - ); - }, - } - } else { - gum::warn!( - target: LOG_TARGET, - ?block_hash, - ?candidate_hash, - "Cannot get assignment claimed core indices", - ); + match cores_to_candidate_indices(&claimed_core_indices, &block_entry) { + Ok(bitfield) => { + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?candidate_entry.candidate_receipt().hash(), + ?block_hash, + "Discovered, triggered assignment, not approved yet", + ); + + let indirect_cert = IndirectAssignmentCertV2 { + block_hash, + validator: assignment.validator_index(), + cert: assignment.cert().clone(), + }; + messages.push( + ApprovalDistributionMessage::DistributeAssignment( + indirect_cert.clone(), + bitfield.clone(), + ), + ); + + if !block_entry.candidate_is_pending_signature(*candidate_hash) + { + let ExtendedSessionInfo { ref executor_params, .. } = + match get_extended_session_info( + session_info_provider, + ctx.sender(), + block_entry.block_hash(), + block_entry.session(), + ) + .await + { + Some(i) => i, + None => continue, + }; + + actions.push(Action::LaunchApproval { + claimed_candidate_indices: bitfield, + candidate_hash: candidate_entry + .candidate_receipt() + .hash(), + indirect_cert, + assignment_tranche: assignment.tranche(), + relay_block_hash: block_hash, + session: block_entry.session(), + executor_params: executor_params.clone(), + candidate: candidate_entry.candidate_receipt().clone(), + backing_group: approval_entry.backing_group(), + distribute_assignment: false, + }); + } + }, + Err(err) => { + // Should never happen. If we fail here it means the + // assignment is null (no cores claimed). + gum::warn!( + target: LOG_TARGET, + ?block_hash, + ?candidate_hash, + ?err, + "Failed to create assignment bitfield", + ); + }, } }, (Some(assignment), Some(approval_sig)) => { - if let Some(claimed_core_indices) = get_assignment_core_indices( - &assignment.cert().kind, - &candidate_hash, - &block_entry, - ) { - match cores_to_candidate_indices( - &claimed_core_indices, - &block_entry, - ) { - Ok(bitfield) => messages.push( - ApprovalDistributionMessage::DistributeAssignment( - IndirectAssignmentCertV2 { - block_hash, - validator: assignment.validator_index(), - cert: assignment.cert().clone(), - }, - bitfield, - ), - ), - Err(err) => { - gum::warn!( - target: LOG_TARGET, - ?block_hash, - ?candidate_hash, - ?err, - "Failed to create assignment bitfield", - ); - // If we didn't send assignment, we don't send approval. - continue - }, - } - if signatures_queued - .insert(approval_sig.signed_candidates_indices.clone()) - { - messages.push(ApprovalDistributionMessage::DistributeApproval( - IndirectSignedApprovalVoteV2 { + let claimed_core_indices = + get_core_indices_on_startup(&assignment.cert().kind, *core_index); + match cores_to_candidate_indices(&claimed_core_indices, &block_entry) { + Ok(bitfield) => messages.push( + ApprovalDistributionMessage::DistributeAssignment( + IndirectAssignmentCertV2 { block_hash, - candidate_indices: approval_sig - .signed_candidates_indices, validator: assignment.validator_index(), - signature: approval_sig.signature, + cert: assignment.cert().clone(), }, - )) - }; - } else { - gum::warn!( - target: LOG_TARGET, - ?block_hash, - ?candidate_hash, - "Cannot get assignment claimed core indices", - ); + bitfield, + ), + ), + Err(err) => { + gum::warn!( + target: LOG_TARGET, + ?block_hash, + ?candidate_hash, + ?err, + "Failed to create assignment bitfield", + ); + // If we didn't send assignment, we don't send approval. + continue + }, } + if signatures_queued + .insert(approval_sig.signed_candidates_indices.clone()) + { + messages.push(ApprovalDistributionMessage::DistributeApproval( + IndirectSignedApprovalVoteV2 { + block_hash, + candidate_indices: approval_sig.signed_candidates_indices, + validator: assignment.validator_index(), + signature: approval_sig.signature, + }, + )) + }; }, } }, @@ -1865,7 +1849,7 @@ async fn get_approval_signatures_for_candidate( gum::trace!( target: LOG_TARGET, ?candidate_hash, - "Spawning task for fetching sinatures from approval-distribution" + "Spawning task for fetching signatures from approval-distribution" ); ctx.spawn("get-approval-signatures", Box::pin(get_approvals)) } diff --git a/polkadot/node/core/approval-voting/src/persisted_entries.rs b/polkadot/node/core/approval-voting/src/persisted_entries.rs index b924a1b52ccf..6eeb99cb99ff 100644 --- a/polkadot/node/core/approval-voting/src/persisted_entries.rs +++ b/polkadot/node/core/approval-voting/src/persisted_entries.rs @@ -454,7 +454,7 @@ pub struct BlockEntry { slot: Slot, relay_vrf_story: RelayVRFStory, // The candidates included as-of this block and the index of the core they are - // leaving. Sorted ascending by core index. + // leaving. candidates: Vec<(CoreIndex, CandidateHash)>, // A bitfield where the i'th bit corresponds to the i'th candidate in `candidates`. // The i'th bit is `true` iff the candidate has been approved in the context of this diff --git a/polkadot/node/core/approval-voting/src/tests.rs b/polkadot/node/core/approval-voting/src/tests.rs index c9053232a4c8..f7bbbca4b8a1 100644 --- a/polkadot/node/core/approval-voting/src/tests.rs +++ b/polkadot/node/core/approval-voting/src/tests.rs @@ -37,8 +37,8 @@ use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::TimeoutExt; use polkadot_overseer::HeadSupportsParachains; use polkadot_primitives::{ - vstaging::NodeFeatures, ApprovalVote, CandidateCommitments, CandidateEvent, CoreIndex, - GroupIndex, Header, Id as ParaId, IndexedVec, ValidationCode, ValidatorSignature, + ApprovalVote, CandidateCommitments, CandidateEvent, CoreIndex, GroupIndex, Header, + Id as ParaId, IndexedVec, NodeFeatures, ValidationCode, ValidatorSignature, }; use std::time::Duration; @@ -2479,6 +2479,173 @@ fn subsystem_import_checked_approval_sets_one_block_bit_at_a_time() { }); } +// See https://github.com/paritytech/polkadot-sdk/issues/3826 +#[test] +fn inclusion_events_can_be_unordered_by_core_index() { + let assignment_criteria = Box::new(MockAssignmentCriteria( + || { + let mut assignments = HashMap::new(); + for core in 0..3 { + let _ = assignments.insert( + CoreIndex(core), + approval_db::v2::OurAssignment { + cert: garbage_assignment_cert_v2( + AssignmentCertKindV2::RelayVRFModuloCompact { + core_bitfield: vec![CoreIndex(0), CoreIndex(1), CoreIndex(2)] + .try_into() + .unwrap(), + }, + ), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + } + assignments + }, + |_| Ok(0), + )); + let config = HarnessConfigBuilder::default().assignment_criteria(assignment_criteria).build(); + let store = config.backend(); + + test_harness(config, |test_harness| async move { + let TestHarness { + mut virtual_overseer, + clock, + sync_oracle_handle: _sync_oracle_handle, + .. + } = test_harness; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber(rx)) => { + rx.send(Ok(0)).unwrap(); + } + ); + + let block_hash = Hash::repeat_byte(0x01); + + let candidate_receipt0 = { + let mut receipt = dummy_candidate_receipt(block_hash); + receipt.descriptor.para_id = ParaId::from(0_u32); + receipt + }; + let candidate_receipt1 = { + let mut receipt = dummy_candidate_receipt(block_hash); + receipt.descriptor.para_id = ParaId::from(1_u32); + receipt + }; + let candidate_receipt2 = { + let mut receipt = dummy_candidate_receipt(block_hash); + receipt.descriptor.para_id = ParaId::from(2_u32); + receipt + }; + let candidate_index0 = 0; + let candidate_index1 = 1; + let candidate_index2 = 2; + + let validator0 = ValidatorIndex(0); + let validator1 = ValidatorIndex(1); + let validator2 = ValidatorIndex(2); + let validator3 = ValidatorIndex(3); + + let validators = vec![ + Sr25519Keyring::Alice, + Sr25519Keyring::Bob, + Sr25519Keyring::Charlie, + Sr25519Keyring::Dave, + Sr25519Keyring::Eve, + ]; + let session_info = SessionInfo { + validator_groups: IndexedVec::>::from(vec![ + vec![validator0, validator1], + vec![validator2], + vec![validator3], + ]), + needed_approvals: 1, + zeroth_delay_tranche_width: 1, + relay_vrf_modulo_samples: 1, + n_delay_tranches: 1, + no_show_slots: 1, + ..session_info(&validators) + }; + + ChainBuilder::new() + .add_block( + block_hash, + ChainBuilder::GENESIS_HASH, + 1, + BlockConfig { + slot: Slot::from(0), + candidates: Some(vec![ + (candidate_receipt0.clone(), CoreIndex(2), GroupIndex(2)), + (candidate_receipt1.clone(), CoreIndex(1), GroupIndex(0)), + (candidate_receipt2.clone(), CoreIndex(0), GroupIndex(1)), + ]), + session_info: Some(session_info), + end_syncing: true, + }, + ) + .build(&mut virtual_overseer) + .await; + + assert_eq!(clock.inner.lock().next_wakeup().unwrap(), 2); + clock.inner.lock().wakeup_all(100); + + assert_eq!(clock.inner.lock().wakeups.len(), 0); + + futures_timer::Delay::new(Duration::from_millis(100)).await; + + // Assignment is distributed only once from `approval-voting` + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + c_indices, + )) => { + assert_eq!(c_indices, vec![candidate_index0, candidate_index1, candidate_index2].try_into().unwrap()); + } + ); + + // Candidate 0 + recover_available_data(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + // Candidate 1 + recover_available_data(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + // Candidate 2 + recover_available_data(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + // Check if assignment was triggered for candidate 0. + let candidate_entry = + store.load_candidate_entry(&candidate_receipt0.hash()).unwrap().unwrap(); + let our_assignment = + candidate_entry.approval_entry(&block_hash).unwrap().our_assignment().unwrap(); + assert!(our_assignment.triggered()); + + // Check if assignment was triggered for candidate 1. + let candidate_entry = + store.load_candidate_entry(&candidate_receipt1.hash()).unwrap().unwrap(); + let our_assignment = + candidate_entry.approval_entry(&block_hash).unwrap().our_assignment().unwrap(); + assert!(our_assignment.triggered()); + + // Check if assignment was triggered for candidate 2. + let candidate_entry = + store.load_candidate_entry(&candidate_receipt2.hash()).unwrap().unwrap(); + let our_assignment = + candidate_entry.approval_entry(&block_hash).unwrap().our_assignment().unwrap(); + assert!(our_assignment.triggered()); + + virtual_overseer + }); +} + fn approved_ancestor_test( skip_approval: impl Fn(BlockNumber) -> bool, approved_height: BlockNumber, @@ -3154,7 +3321,7 @@ where // starting configuration. The relevant ticks (all scheduled wakeups) are printed after no further // ticks are scheduled. To create a valid test, a prefix of the relevant ticks should be included // in the final test configuration, ending at the tick with the desired inputs to -// should_trigger_assignemnt. +// should_trigger_assignment. async fn step_until_done(clock: &MockClock) { let mut relevant_ticks = Vec::new(); loop { @@ -3837,7 +4004,7 @@ fn test_approval_is_sent_on_max_approval_coalesce_count() { async fn handle_approval_on_max_coalesce_count( virtual_overseer: &mut VirtualOverseer, - candidate_indicies: Vec, + candidate_indices: Vec, ) { assert_matches!( overseer_recv(virtual_overseer).await, @@ -3845,16 +4012,16 @@ async fn handle_approval_on_max_coalesce_count( _, c_indices, )) => { - assert_eq!(TryInto::::try_into(candidate_indicies.clone()).unwrap(), c_indices); + assert_eq!(TryInto::::try_into(candidate_indices.clone()).unwrap(), c_indices); } ); - for _ in &candidate_indicies { + for _ in &candidate_indices { recover_available_data(virtual_overseer).await; fetch_validation_code(virtual_overseer).await; } - for _ in &candidate_indicies { + for _ in &candidate_indices { assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive{exec_kind, response_sender, ..}) if exec_kind == PvfExecKind::Approval => { @@ -3885,7 +4052,7 @@ async fn handle_approval_on_max_coalesce_count( assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(vote)) => { - assert_eq!(TryInto::::try_into(candidate_indicies).unwrap(), vote.candidate_indices); + assert_eq!(TryInto::::try_into(candidate_indices).unwrap(), vote.candidate_indices); } ); @@ -3895,7 +4062,7 @@ async fn handle_approval_on_max_coalesce_count( async fn handle_approval_on_max_wait_time( virtual_overseer: &mut VirtualOverseer, - candidate_indicies: Vec, + candidate_indices: Vec, clock: Box, ) { const TICK_NOW_BEGIN: u64 = 1; @@ -3909,16 +4076,16 @@ async fn handle_approval_on_max_wait_time( _, c_indices, )) => { - assert_eq!(TryInto::::try_into(candidate_indicies.clone()).unwrap(), c_indices); + assert_eq!(TryInto::::try_into(candidate_indices.clone()).unwrap(), c_indices); } ); - for _ in &candidate_indicies { + for _ in &candidate_indices { recover_available_data(virtual_overseer).await; fetch_validation_code(virtual_overseer).await; } - for _ in &candidate_indicies { + for _ in &candidate_indices { assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive{exec_kind, response_sender, ..}) if exec_kind == PvfExecKind::Approval => { @@ -3978,7 +4145,7 @@ async fn handle_approval_on_max_wait_time( assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(vote)) => { - assert_eq!(TryInto::::try_into(candidate_indicies).unwrap(), vote.candidate_indices); + assert_eq!(TryInto::::try_into(candidate_indices).unwrap(), vote.candidate_indices); } ); @@ -4319,7 +4486,7 @@ fn subsystem_relaunches_approval_work_on_restart() { virtual_overseer }); - // Restart a new approval voting subsystem with the same database and major syncing true untill + // Restart a new approval voting subsystem with the same database and major syncing true until // the last leaf. let config = HarnessConfigBuilder::default().backend(store_clone).major_syncing(true).build(); diff --git a/polkadot/node/core/approval-voting/src/time.rs b/polkadot/node/core/approval-voting/src/time.rs index 99dfbe07678f..5c3e7e85a17a 100644 --- a/polkadot/node/core/approval-voting/src/time.rs +++ b/polkadot/node/core/approval-voting/src/time.rs @@ -126,13 +126,13 @@ impl DelayedApprovalTimer { /// no additional timer is started. pub(crate) fn maybe_arm_timer( &mut self, - wait_untill: Tick, + wait_until: Tick, clock: &dyn Clock, block_hash: Hash, validator_index: ValidatorIndex, ) { if self.blocks.insert(block_hash) { - let clock_wait = clock.wait(wait_untill); + let clock_wait = clock.wait(wait_until); self.timers.push(Box::pin(async move { clock_wait.await; (block_hash, validator_index) diff --git a/polkadot/node/core/av-store/Cargo.toml b/polkadot/node/core/av-store/Cargo.toml index 05212da74798..bc9b979228a1 100644 --- a/polkadot/node/core/av-store/Cargo.toml +++ b/polkadot/node/core/av-store/Cargo.toml @@ -10,7 +10,7 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" kvdb = "0.13.0" thiserror = { workspace = true } @@ -29,7 +29,7 @@ polkadot-node-jaeger = { path = "../../jaeger" } [dev-dependencies] log = { workspace = true, default-features = true } -env_logger = "0.9.0" +env_logger = "0.11" assert_matches = "1.4.0" kvdb-memorydb = "0.13.0" diff --git a/polkadot/node/core/av-store/src/lib.rs b/polkadot/node/core/av-store/src/lib.rs index ef7dcecac075..68db4686a974 100644 --- a/polkadot/node/core/av-store/src/lib.rs +++ b/polkadot/node/core/av-store/src/lib.rs @@ -1218,7 +1218,7 @@ fn process_message( // tx channel is dropped and that error is caught by the caller subsystem. // // We bubble up the specific error here so `av-store` logs still tell what - // happend. + // happened. return Err(e.into()) }, } diff --git a/polkadot/node/core/backing/Cargo.toml b/polkadot/node/core/backing/Cargo.toml index d0c1f9aa4832..26fa54470fbd 100644 --- a/polkadot/node/core/backing/Cargo.toml +++ b/polkadot/node/core/backing/Cargo.toml @@ -10,7 +10,7 @@ description = "The Candidate Backing Subsystem. Tracks parachain candidates that workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" sp-keystore = { path = "../../../../substrate/primitives/keystore" } polkadot-primitives = { path = "../../../primitives" } polkadot-node-primitives = { path = "../../primitives" } @@ -30,7 +30,7 @@ sp-application-crypto = { path = "../../../../substrate/primitives/application-c sp-keyring = { path = "../../../../substrate/primitives/keyring" } sc-keystore = { path = "../../../../substrate/client/keystore" } sp-tracing = { path = "../../../../substrate/primitives/tracing" } -futures = { version = "0.3.21", features = ["thread-pool"] } +futures = { version = "0.3.30", features = ["thread-pool"] } assert_matches = "1.4.0" rstest = "0.18.2" polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } diff --git a/polkadot/node/core/backing/src/error.rs b/polkadot/node/core/backing/src/error.rs index 64955a393962..52684f3fe306 100644 --- a/polkadot/node/core/backing/src/error.rs +++ b/polkadot/node/core/backing/src/error.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +use std::collections::HashMap; + use fatality::Nested; use futures::channel::{mpsc, oneshot}; @@ -24,7 +26,7 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_util::{runtime, Error as UtilError}; use polkadot_primitives::{BackedCandidate, ValidationCodeHash}; -use crate::LOG_TARGET; +use crate::{ParaId, LOG_TARGET}; pub type Result = std::result::Result; pub type FatalResult = std::result::Result; @@ -55,7 +57,7 @@ pub enum Error { InvalidSignature, #[error("Failed to send candidates {0:?}")] - Send(Vec), + Send(HashMap>), #[error("FetchPoV failed")] FetchPoV, diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index 69bf2e956a0f..23acb0450944 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -105,12 +105,11 @@ use polkadot_node_subsystem_util::{ Validator, }; use polkadot_primitives::{ - vstaging::{node_features::FeatureIndex, NodeFeatures}, - BackedCandidate, CandidateCommitments, CandidateHash, CandidateReceipt, - CommittedCandidateReceipt, CoreIndex, CoreState, ExecutorParams, GroupIndex, GroupRotationInfo, - Hash, Id as ParaId, IndexedVec, PersistedValidationData, PvfExecKind, SessionIndex, - SigningContext, ValidationCode, ValidatorId, ValidatorIndex, ValidatorSignature, - ValidityAttestation, + node_features::FeatureIndex, BackedCandidate, CandidateCommitments, CandidateHash, + CandidateReceipt, CommittedCandidateReceipt, CoreIndex, CoreState, ExecutorParams, GroupIndex, + GroupRotationInfo, Hash, Id as ParaId, IndexedVec, NodeFeatures, PersistedValidationData, + PvfExecKind, SessionIndex, SigningContext, ValidationCode, ValidatorId, ValidatorIndex, + ValidatorSignature, ValidityAttestation, }; use sp_keystore::KeystorePtr; use statement_table::{ @@ -292,7 +291,7 @@ struct State { /// Cache the per-session Validator->Group mapping. validator_to_group_cache: LruMap>>>, - /// A cloneable sender which is dispatched to background candidate validation tasks to inform + /// A clonable sender which is dispatched to background candidate validation tasks to inform /// the main task of the result. background_validation_tx: mpsc::Sender<(Hash, ValidatedCandidateCommand)>, /// The handle to the keystore used for signing. @@ -911,7 +910,7 @@ async fn handle_active_leaves_update( } let mut seconded_at_depth = HashMap::new(); - if let Some(response) = membership_answers.next().await { + while let Some(response) = membership_answers.next().await { match response { Err(oneshot::Canceled) => { gum::warn!( @@ -2231,15 +2230,16 @@ async fn handle_statement_message( fn handle_get_backed_candidates_message( state: &State, - requested_candidates: Vec<(CandidateHash, Hash)>, - tx: oneshot::Sender>, + requested_candidates: HashMap>, + tx: oneshot::Sender>>, metrics: &Metrics, ) -> Result<(), Error> { let _timer = metrics.time_get_backed_candidates(); - let backed = requested_candidates - .into_iter() - .filter_map(|(candidate_hash, relay_parent)| { + let mut backed = HashMap::with_capacity(requested_candidates.len()); + + for (para_id, para_candidates) in requested_candidates { + for (candidate_hash, relay_parent) in para_candidates.iter() { let rp_state = match state.per_relay_parent.get(&relay_parent) { Some(rp_state) => rp_state, None => { @@ -2249,13 +2249,13 @@ fn handle_get_backed_candidates_message( ?candidate_hash, "Requested candidate's relay parent is out of view", ); - return None + break }, }; - rp_state + let maybe_backed_candidate = rp_state .table .attested_candidate( - &candidate_hash, + candidate_hash, &rp_state.table_context, rp_state.minimum_backing_votes, ) @@ -2265,9 +2265,18 @@ fn handle_get_backed_candidates_message( &rp_state.table_context, rp_state.inject_core_index, ) - }) - }) - .collect(); + }); + + if let Some(backed_candidate) = maybe_backed_candidate { + backed + .entry(para_id) + .or_insert_with(|| Vec::with_capacity(para_candidates.len())) + .push(backed_candidate); + } else { + break + } + } + } tx.send(backed).map_err(|data| Error::Send(data))?; Ok(()) diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index e3cc5727435a..d1969e656db6 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -33,8 +33,8 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_primitives::{ - vstaging::node_features, CandidateDescriptor, GroupRotationInfo, HeadData, - PersistedValidationData, PvfExecKind, ScheduledCore, SessionIndex, LEGACY_MIN_BACKING_VOTES, + node_features, CandidateDescriptor, GroupRotationInfo, HeadData, PersistedValidationData, + PvfExecKind, ScheduledCore, SessionIndex, LEGACY_MIN_BACKING_VOTES, }; use rstest::rstest; use sp_application_crypto::AppCrypto; @@ -663,13 +663,19 @@ fn backing_works(#[case] elastic_scaling_mvp: bool) { let (tx, rx) = oneshot::channel(); let msg = CandidateBackingMessage::GetBackedCandidates( - vec![(candidate_a_hash, test_state.relay_parent)], + std::iter::once(( + test_state.chain_ids[0], + vec![(candidate_a_hash, test_state.relay_parent)], + )) + .collect(), tx, ); virtual_overseer.send(FromOrchestra::Communication { msg }).await; - let candidates = rx.await.unwrap(); + let mut candidates = rx.await.unwrap(); + assert_eq!(1, candidates.len()); + let candidates = candidates.remove(&test_state.chain_ids[0]).unwrap(); assert_eq!(1, candidates.len()); assert_eq!(candidates[0].validity_votes().len(), 3); @@ -695,6 +701,323 @@ fn backing_works(#[case] elastic_scaling_mvp: bool) { }); } +#[test] +fn get_backed_candidate_preserves_order() { + let mut test_state = TestState::default(); + test_state + .node_features + .resize((node_features::FeatureIndex::ElasticScalingMVP as u8 + 1) as usize, false); + test_state + .node_features + .set(node_features::FeatureIndex::ElasticScalingMVP as u8 as usize, true); + + // Set a single validator as the first validator group. It simplifies the test. + test_state.validator_groups.0[0] = vec![ValidatorIndex(2)]; + // Add another validator group for the third core. + test_state.validator_groups.0.push(vec![ValidatorIndex(3)]); + // Assign the second core to the same para as the first one. + test_state.availability_cores[1] = + CoreState::Scheduled(ScheduledCore { para_id: test_state.chain_ids[0], collator: None }); + // Add another availability core for paraid 2. + test_state.availability_cores.push(CoreState::Scheduled(ScheduledCore { + para_id: test_state.chain_ids[1], + collator: None, + })); + + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + test_startup(&mut virtual_overseer, &test_state).await; + + let pov_a = PoV { block_data: BlockData(vec![1, 2, 3]) }; + let pov_b = PoV { block_data: BlockData(vec![3, 4, 5]) }; + let pov_c = PoV { block_data: BlockData(vec![5, 6, 7]) }; + let validation_code_ab = ValidationCode(vec![1, 2, 3]); + let validation_code_c = ValidationCode(vec![4, 5, 6]); + + let parent_head_data_a = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); + let parent_head_data_b = { + let mut head = parent_head_data_a.clone(); + head.0[0] = 98; + head + }; + let output_head_data_b = { + let mut head = parent_head_data_a.clone(); + head.0[0] = 99; + head + }; + let parent_head_data_c = test_state.head_data.get(&test_state.chain_ids[1]).unwrap(); + let output_head_data_c = { + let mut head = parent_head_data_c.clone(); + head.0[0] = 97; + head + }; + + let pvd_a = PersistedValidationData { + parent_head: parent_head_data_a.clone(), + relay_parent_number: 0_u32.into(), + max_pov_size: 1024, + relay_parent_storage_root: dummy_hash(), + }; + let pvd_b = PersistedValidationData { + parent_head: parent_head_data_b.clone(), + relay_parent_number: 0_u32.into(), + max_pov_size: 1024, + relay_parent_storage_root: dummy_hash(), + }; + let pvd_c = PersistedValidationData { + parent_head: parent_head_data_c.clone(), + relay_parent_number: 0_u32.into(), + max_pov_size: 1024, + relay_parent_storage_root: dummy_hash(), + }; + + let candidate_a = TestCandidateBuilder { + para_id: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_hash: pov_a.hash(), + head_data: parent_head_data_b.clone(), + erasure_root: make_erasure_root(&test_state, pov_a.clone(), pvd_a.clone()), + validation_code: validation_code_ab.0.clone(), + persisted_validation_data_hash: pvd_a.hash(), + } + .build(); + let candidate_b = TestCandidateBuilder { + para_id: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_hash: pov_b.hash(), + head_data: output_head_data_b.clone(), + erasure_root: make_erasure_root(&test_state, pov_b.clone(), pvd_b.clone()), + validation_code: validation_code_ab.0.clone(), + persisted_validation_data_hash: pvd_b.hash(), + } + .build(); + let candidate_c = TestCandidateBuilder { + para_id: test_state.chain_ids[1], + relay_parent: test_state.relay_parent, + pov_hash: pov_c.hash(), + head_data: output_head_data_c.clone(), + erasure_root: make_erasure_root(&test_state, pov_b.clone(), pvd_c.clone()), + validation_code: validation_code_c.0.clone(), + persisted_validation_data_hash: pvd_c.hash(), + } + .build(); + let candidate_a_hash = candidate_a.hash(); + let candidate_b_hash = candidate_b.hash(); + let candidate_c_hash = candidate_c.hash(); + + // Back a chain of two candidates for the first paraid. Back one candidate for the second + // paraid. + for (candidate, pvd, validator_index) in [ + (candidate_a, pvd_a, ValidatorIndex(2)), + (candidate_b, pvd_b, ValidatorIndex(1)), + (candidate_c, pvd_c, ValidatorIndex(3)), + ] { + let public = Keystore::sr25519_generate_new( + &*test_state.keystore, + ValidatorId::ID, + Some(&test_state.validators[validator_index.0 as usize].to_seed()), + ) + .expect("Insert key into keystore"); + + let signed = SignedFullStatementWithPVD::sign( + &test_state.keystore, + StatementWithPVD::Seconded(candidate.clone(), pvd.clone()), + &test_state.signing_context, + validator_index, + &public.into(), + ) + .ok() + .flatten() + .expect("should be signed"); + + let statement = + CandidateBackingMessage::Statement(test_state.relay_parent, signed.clone()); + + virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::Provisioner( + ProvisionerMessage::ProvisionableData( + _, + ProvisionableData::BackedCandidate(candidate_receipt) + ) + ) => { + assert_eq!(candidate_receipt, candidate.to_plain()); + } + ); + } + + // Happy case, all candidates should be present. + let (tx, rx) = oneshot::channel(); + let msg = CandidateBackingMessage::GetBackedCandidates( + [ + ( + test_state.chain_ids[0], + vec![ + (candidate_a_hash, test_state.relay_parent), + (candidate_b_hash, test_state.relay_parent), + ], + ), + (test_state.chain_ids[1], vec![(candidate_c_hash, test_state.relay_parent)]), + ] + .into_iter() + .collect(), + tx, + ); + virtual_overseer.send(FromOrchestra::Communication { msg }).await; + let mut candidates = rx.await.unwrap(); + assert_eq!(2, candidates.len()); + assert_eq!( + candidates + .remove(&test_state.chain_ids[0]) + .unwrap() + .iter() + .map(|c| c.hash()) + .collect::>(), + vec![candidate_a_hash, candidate_b_hash] + ); + assert_eq!( + candidates + .remove(&test_state.chain_ids[1]) + .unwrap() + .iter() + .map(|c| c.hash()) + .collect::>(), + vec![candidate_c_hash] + ); + + // The first candidate of the first para is invalid (we supply the wrong relay parent or a + // wrong candidate hash). No candidates should be returned for paraid 1. ParaId 2 should be + // fine. + for candidates in [ + vec![ + (candidate_a_hash, Hash::repeat_byte(9)), + (candidate_b_hash, test_state.relay_parent), + ], + vec![ + (CandidateHash(Hash::repeat_byte(9)), test_state.relay_parent), + (candidate_b_hash, test_state.relay_parent), + ], + ] { + let (tx, rx) = oneshot::channel(); + let msg = CandidateBackingMessage::GetBackedCandidates( + [ + (test_state.chain_ids[0], candidates), + (test_state.chain_ids[1], vec![(candidate_c_hash, test_state.relay_parent)]), + ] + .into_iter() + .collect(), + tx, + ); + virtual_overseer.send(FromOrchestra::Communication { msg }).await; + let mut candidates = rx.await.unwrap(); + assert_eq!(candidates.len(), 1); + + assert!(candidates.remove(&test_state.chain_ids[0]).is_none()); + assert_eq!( + candidates + .remove(&test_state.chain_ids[1]) + .unwrap() + .iter() + .map(|c| c.hash()) + .collect::>(), + vec![candidate_c_hash] + ); + } + + // The second candidate of the first para is invalid (we supply the wrong relay parent or a + // wrong candidate hash). The first candidate of the first para should still be present. + // ParaId 2 is fine. + for candidates in [ + vec![ + (candidate_a_hash, test_state.relay_parent), + (candidate_b_hash, Hash::repeat_byte(9)), + ], + vec![ + (candidate_a_hash, test_state.relay_parent), + (CandidateHash(Hash::repeat_byte(9)), test_state.relay_parent), + ], + ] { + let (tx, rx) = oneshot::channel(); + let msg = CandidateBackingMessage::GetBackedCandidates( + [ + (test_state.chain_ids[0], candidates), + (test_state.chain_ids[1], vec![(candidate_c_hash, test_state.relay_parent)]), + ] + .into_iter() + .collect(), + tx, + ); + virtual_overseer.send(FromOrchestra::Communication { msg }).await; + let mut candidates = rx.await.unwrap(); + assert_eq!(2, candidates.len()); + assert_eq!( + candidates + .remove(&test_state.chain_ids[0]) + .unwrap() + .iter() + .map(|c| c.hash()) + .collect::>(), + vec![candidate_a_hash] + ); + assert_eq!( + candidates + .remove(&test_state.chain_ids[1]) + .unwrap() + .iter() + .map(|c| c.hash()) + .collect::>(), + vec![candidate_c_hash] + ); + } + + // Both candidates of para id 1 are invalid (we supply the wrong relay parent or a wrong + // candidate hash). No candidates should be returned for para id 1. Para Id 2 is fine. + for candidates in [ + vec![ + (CandidateHash(Hash::repeat_byte(9)), test_state.relay_parent), + (CandidateHash(Hash::repeat_byte(10)), test_state.relay_parent), + ], + vec![ + (candidate_a_hash, Hash::repeat_byte(9)), + (candidate_b_hash, Hash::repeat_byte(10)), + ], + ] { + let (tx, rx) = oneshot::channel(); + let msg = CandidateBackingMessage::GetBackedCandidates( + [ + (test_state.chain_ids[0], candidates), + (test_state.chain_ids[1], vec![(candidate_c_hash, test_state.relay_parent)]), + ] + .into_iter() + .collect(), + tx, + ); + virtual_overseer.send(FromOrchestra::Communication { msg }).await; + let mut candidates = rx.await.unwrap(); + assert_eq!(candidates.len(), 1); + + assert!(candidates.remove(&test_state.chain_ids[0]).is_none()); + assert_eq!( + candidates + .remove(&test_state.chain_ids[1]) + .unwrap() + .iter() + .map(|c| c.hash()) + .collect::>(), + vec![candidate_c_hash] + ); + } + + virtual_overseer + .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves( + ActiveLeavesUpdate::stop_work(test_state.relay_parent), + ))) + .await; + virtual_overseer + }); +} + #[test] fn extract_core_index_from_statement_works() { let test_state = TestState::default(); @@ -950,13 +1273,19 @@ fn backing_works_while_validation_ongoing() { let (tx, rx) = oneshot::channel(); let msg = CandidateBackingMessage::GetBackedCandidates( - vec![(candidate_a.hash(), test_state.relay_parent)], + std::iter::once(( + test_state.chain_ids[0], + vec![(candidate_a.hash(), test_state.relay_parent)], + )) + .collect(), tx, ); virtual_overseer.send(FromOrchestra::Communication { msg }).await; - let candidates = rx.await.unwrap(); + let mut candidates = rx.await.unwrap(); + assert_eq!(candidates.len(), 1); + let candidates = candidates.remove(&test_state.chain_ids[0]).unwrap(); assert_eq!(1, candidates.len()); assert_eq!(candidates[0].validity_votes().len(), 3); @@ -1565,7 +1894,11 @@ fn backing_works_after_failed_validation() { // and check that it is still alive. let (tx, rx) = oneshot::channel(); let msg = CandidateBackingMessage::GetBackedCandidates( - vec![(candidate.hash(), test_state.relay_parent)], + std::iter::once(( + test_state.chain_ids[0], + vec![(candidate.hash(), test_state.relay_parent)], + )) + .collect(), tx, ); diff --git a/polkadot/node/core/bitfield-signing/Cargo.toml b/polkadot/node/core/bitfield-signing/Cargo.toml index 6ecfffd7249b..0663e0f1b699 100644 --- a/polkadot/node/core/bitfield-signing/Cargo.toml +++ b/polkadot/node/core/bitfield-signing/Cargo.toml @@ -10,7 +10,7 @@ description = "Bitfield signing subsystem for the Polkadot node" workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } polkadot-primitives = { path = "../../../primitives" } polkadot-node-subsystem = { path = "../../subsystem" } diff --git a/polkadot/node/core/candidate-validation/Cargo.toml b/polkadot/node/core/candidate-validation/Cargo.toml index 15fc8c940d33..0cf4707aad29 100644 --- a/polkadot/node/core/candidate-validation/Cargo.toml +++ b/polkadot/node/core/candidate-validation/Cargo.toml @@ -10,8 +10,8 @@ license.workspace = true workspace = true [dependencies] -async-trait = "0.1.74" -futures = "0.3.21" +async-trait = "0.1.79" +futures = "0.3.30" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } @@ -31,7 +31,7 @@ polkadot-node-core-pvf = { path = "../pvf" } [dev-dependencies] sp-keyring = { path = "../../../../substrate/primitives/keyring" } -futures = { version = "0.3.21", features = ["thread-pool"] } +futures = { version = "0.3.30", features = ["thread-pool"] } assert_matches = "1.4.0" polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } sp-core = { path = "../../../../substrate/primitives/core" } diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index 8237137fdca0..ec24434db24c 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -617,7 +617,7 @@ async fn validate_candidate_exhaustive( Err(e) => { gum::info!(target: LOG_TARGET, ?para_id, err=?e, "Invalid candidate (validation code)"); - // Code already passed pre-checking, if decompression fails now this most likley means + // Code already passed pre-checking, if decompression fails now this most likely means // some local corruption happened. return Err(ValidationFailed("Code decompression failed".to_string())) }, diff --git a/polkadot/node/core/chain-api/Cargo.toml b/polkadot/node/core/chain-api/Cargo.toml index 9aa017ecba3d..f4d02d3f47b2 100644 --- a/polkadot/node/core/chain-api/Cargo.toml +++ b/polkadot/node/core/chain-api/Cargo.toml @@ -10,7 +10,7 @@ description = "The Chain API subsystem provides access to chain related utility workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } polkadot-node-metrics = { path = "../../metrics" } polkadot-node-subsystem = { path = "../../subsystem" } @@ -19,7 +19,7 @@ sc-client-api = { path = "../../../../substrate/client/api" } sc-consensus-babe = { path = "../../../../substrate/client/consensus/babe" } [dev-dependencies] -futures = { version = "0.3.21", features = ["thread-pool"] } +futures = { version = "0.3.30", features = ["thread-pool"] } maplit = "1.0.2" parity-scale-codec = "3.6.1" polkadot-node-primitives = { path = "../../primitives" } diff --git a/polkadot/node/core/chain-selection/Cargo.toml b/polkadot/node/core/chain-selection/Cargo.toml index 96fd42785cdd..318f27a43086 100644 --- a/polkadot/node/core/chain-selection/Cargo.toml +++ b/polkadot/node/core/chain-selection/Cargo.toml @@ -10,7 +10,7 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3" gum = { package = "tracing-gum", path = "../../gum" } polkadot-primitives = { path = "../../../primitives" } diff --git a/polkadot/node/core/chain-selection/src/lib.rs b/polkadot/node/core/chain-selection/src/lib.rs index aa5bb9548ad2..6f864fefb611 100644 --- a/polkadot/node/core/chain-selection/src/lib.rs +++ b/polkadot/node/core/chain-selection/src/lib.rs @@ -51,7 +51,7 @@ type Timestamp = u64; // If a block isn't approved in 120 seconds, nodes will abandon it // and begin building on another chain. const STAGNANT_TIMEOUT: Timestamp = 120; -// Delay prunning of the stagnant keys in prune only mode by 25 hours to avoid interception with the +// Delay pruning of the stagnant keys in prune only mode by 25 hours to avoid interception with the // finality const STAGNANT_PRUNE_DELAY: Timestamp = 25 * 60 * 60; // Maximum number of stagnant entries cleaned during one `STAGNANT_TIMEOUT` iteration @@ -237,7 +237,7 @@ impl Clock for SystemClock { // // The exact time that a block becomes stagnant in the local node is always expected // to differ from other nodes due to network asynchrony and delays in block propagation. - // Non-monotonicity exarcerbates that somewhat, but not meaningfully. + // Non-monotonicity exacerbates that somewhat, but not meaningfully. match SystemTime::now().duration_since(UNIX_EPOCH) { Ok(d) => d.as_secs(), diff --git a/polkadot/node/core/chain-selection/src/tests.rs b/polkadot/node/core/chain-selection/src/tests.rs index cf021c0efeb0..bc998f268a0d 100644 --- a/polkadot/node/core/chain-selection/src/tests.rs +++ b/polkadot/node/core/chain-selection/src/tests.rs @@ -406,7 +406,7 @@ async fn import_chains_into_empty( // some pre-blocks may need to be supplied to answer ancestry requests // that gather batches beyond the beginning of the new chain. // pre-blocks are those already known by the subsystem, however, -// the subsystem has no way of knowin that until requesting ancestry. +// the subsystem has no way of knowing that until requesting ancestry. async fn import_all_blocks_into( virtual_overseer: &mut VirtualOverseer, backend: &TestBackend, @@ -1300,7 +1300,7 @@ fn finalize_erases_unviable_from_one_but_not_all_reverts() { // F <- A1 <- A2 <- A3 // // A3 reverts A2 and A1. - // Finalize A1. A2 is stil unviable. + // Finalize A1. A2 is still unviable. let (a3_hash, chain_a) = construct_chain_on_base(vec![1, 2, 3], finalized_number, finalized_hash, |h| { diff --git a/polkadot/node/core/dispute-coordinator/Cargo.toml b/polkadot/node/core/dispute-coordinator/Cargo.toml index 1fff0a771706..cd3238449bea 100644 --- a/polkadot/node/core/dispute-coordinator/Cargo.toml +++ b/polkadot/node/core/dispute-coordinator/Cargo.toml @@ -10,7 +10,7 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } parity-scale-codec = "3.6.1" kvdb = "0.13.0" diff --git a/polkadot/node/core/dispute-coordinator/src/db/v1.rs b/polkadot/node/core/dispute-coordinator/src/db/v1.rs index f0f17d2325d6..4950765cf510 100644 --- a/polkadot/node/core/dispute-coordinator/src/db/v1.rs +++ b/polkadot/node/core/dispute-coordinator/src/db/v1.rs @@ -341,7 +341,7 @@ pub(crate) fn note_earliest_session( let lower_bound = (new_earliest_session, CandidateHash(Hash::repeat_byte(0x00))); let new_recent_disputes = recent_disputes.split_off(&lower_bound); - // Any remanining disputes are considered ancient and must be pruned. + // Any remaining disputes are considered ancient and must be pruned. let pruned_disputes = recent_disputes; if pruned_disputes.len() != 0 { diff --git a/polkadot/node/core/dispute-coordinator/src/lib.rs b/polkadot/node/core/dispute-coordinator/src/lib.rs index 4b511e7430af..daa384b36ffb 100644 --- a/polkadot/node/core/dispute-coordinator/src/lib.rs +++ b/polkadot/node/core/dispute-coordinator/src/lib.rs @@ -462,7 +462,7 @@ async fn wait_for_first_leaf(ctx: &mut Context) -> Result { // Rare case where same candidate was present on multiple heights, but all are // pruned at the same time. This candidate was already pruned in the previous - // occurence so it is skipped now. + // occurrence so it is skipped now. }, Entry::Occupied(mut e) => { let mut blocks_including = std::mem::take(e.get_mut()); diff --git a/polkadot/node/core/dispute-coordinator/src/scraping/tests.rs b/polkadot/node/core/dispute-coordinator/src/scraping/tests.rs index 748f9a16f493..726dda596d7b 100644 --- a/polkadot/node/core/dispute-coordinator/src/scraping/tests.rs +++ b/polkadot/node/core/dispute-coordinator/src/scraping/tests.rs @@ -542,8 +542,8 @@ fn scraper_handles_backed_but_not_included_candidate() { } #[test] -fn scraper_handles_the_same_candidate_incuded_in_two_different_block_heights() { - // Same candidate will be inclued in these two leaves +fn scraper_handles_the_same_candidate_included_in_two_different_block_heights() { + // Same candidate will be included in these two leaves let test_targets = vec![2, 3]; // How many blocks should we skip before sending a leaf update. diff --git a/polkadot/node/core/dispute-coordinator/src/tests.rs b/polkadot/node/core/dispute-coordinator/src/tests.rs index 0360e357bee4..13cf2df88223 100644 --- a/polkadot/node/core/dispute-coordinator/src/tests.rs +++ b/polkadot/node/core/dispute-coordinator/src/tests.rs @@ -61,9 +61,9 @@ use polkadot_node_subsystem_test_helpers::{ make_buffered_subsystem_context, mock::new_leaf, TestSubsystemContextHandle, }; use polkadot_primitives::{ - vstaging::NodeFeatures, ApprovalVote, BlockNumber, CandidateCommitments, CandidateEvent, - CandidateHash, CandidateReceipt, CoreIndex, DisputeStatement, ExecutorParams, GroupIndex, Hash, - HeadData, Header, IndexedVec, MultiDisputeStatementSet, ScrapedOnChainVotes, SessionIndex, + ApprovalVote, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, + CandidateReceipt, CoreIndex, DisputeStatement, ExecutorParams, GroupIndex, Hash, HeadData, + Header, IndexedVec, MultiDisputeStatementSet, NodeFeatures, ScrapedOnChainVotes, SessionIndex, SessionInfo, SigningContext, ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorSignature, }; @@ -2226,7 +2226,7 @@ fn resume_dispute_without_local_statement() { test_state }) }) - // Alice should send a DisputeParticiationMessage::Participate on restart since she has no + // Alice should send a DisputeParticipationMessage::Participate on restart since she has no // local statement for the active dispute. .resume(|mut test_state, mut virtual_overseer| { Box::pin(async move { @@ -2390,7 +2390,7 @@ fn resume_dispute_with_local_statement() { test_state }) }) - // Alice should not send a DisputeParticiationMessage::Participate on restart since she has a + // Alice should not send a DisputeParticipationMessage::Participate on restart since she has a // local statement for the active dispute, instead she should try to (re-)send her vote. .resume(|mut test_state, mut virtual_overseer| { let candidate_receipt = make_valid_candidate_receipt(); @@ -2495,7 +2495,7 @@ fn resume_dispute_without_local_statement_or_local_key() { test_state }) }) - // Two should not send a DisputeParticiationMessage::Participate on restart since she is no + // Two should not send a DisputeParticipationMessage::Participate on restart since she is no // validator in that dispute. .resume(|mut test_state, mut virtual_overseer| { Box::pin(async move { diff --git a/polkadot/node/core/parachains-inherent/Cargo.toml b/polkadot/node/core/parachains-inherent/Cargo.toml index 24da4dc1e316..4f6090f90e95 100644 --- a/polkadot/node/core/parachains-inherent/Cargo.toml +++ b/polkadot/node/core/parachains-inherent/Cargo.toml @@ -10,11 +10,11 @@ description = "Parachains inherent data provider for Polkadot node" workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } thiserror = { workspace = true } -async-trait = "0.1.74" +async-trait = "0.1.79" polkadot-node-subsystem = { path = "../../subsystem" } polkadot-overseer = { path = "../../overseer" } polkadot-primitives = { path = "../../../primitives" } diff --git a/polkadot/node/core/prospective-parachains/Cargo.toml b/polkadot/node/core/prospective-parachains/Cargo.toml index f66a66e859ec..ab3cef99e54f 100644 --- a/polkadot/node/core/prospective-parachains/Cargo.toml +++ b/polkadot/node/core/prospective-parachains/Cargo.toml @@ -10,7 +10,7 @@ description = "The Prospective Parachains subsystem. Tracks and handles prospect workspace = true [dependencies] -futures = "0.3.19" +futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } parity-scale-codec = "3.6.4" thiserror = { workspace = true } diff --git a/polkadot/node/core/provisioner/Cargo.toml b/polkadot/node/core/provisioner/Cargo.toml index 2a09e2b5b2cc..ec1a4abb3ece 100644 --- a/polkadot/node/core/provisioner/Cargo.toml +++ b/polkadot/node/core/provisioner/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } -futures = "0.3.21" +futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } thiserror = { workspace = true } polkadot-primitives = { path = "../../../primitives" } diff --git a/polkadot/node/core/provisioner/src/disputes/prioritized_selection/mod.rs b/polkadot/node/core/provisioner/src/disputes/prioritized_selection/mod.rs index cb55ce39bc89..d7a5a8113369 100644 --- a/polkadot/node/core/provisioner/src/disputes/prioritized_selection/mod.rs +++ b/polkadot/node/core/provisioner/src/disputes/prioritized_selection/mod.rs @@ -52,7 +52,7 @@ pub const MAX_DISPUTE_VOTES_FORWARDED_TO_RUNTIME: usize = 200; /// `dispute-coordinator`. /// /// This value should be less than `MAX_DISPUTE_VOTES_FORWARDED_TO_RUNTIME`. Increase it in case -/// `provisioner` sends too many `QueryCandidateVotes` messages to `dispite-coordinator`. +/// `provisioner` sends too many `QueryCandidateVotes` messages to `dispute-coordinator`. #[cfg(not(test))] const VOTES_SELECTION_BATCH_SIZE: usize = 1_100; #[cfg(test)] diff --git a/polkadot/node/core/provisioner/src/lib.rs b/polkadot/node/core/provisioner/src/lib.rs index c9ed873d3c25..5cfcb96dc2bc 100644 --- a/polkadot/node/core/provisioner/src/lib.rs +++ b/polkadot/node/core/provisioner/src/lib.rs @@ -42,11 +42,11 @@ use polkadot_node_subsystem_util::{ TimeoutExt, }; use polkadot_primitives::{ - vstaging::{node_features::FeatureIndex, NodeFeatures}, - BackedCandidate, BlockNumber, CandidateHash, CandidateReceipt, CoreIndex, CoreState, Hash, - Id as ParaId, OccupiedCoreAssumption, SessionIndex, SignedAvailabilityBitfield, ValidatorIndex, + node_features::FeatureIndex, BackedCandidate, BlockNumber, CandidateHash, CandidateReceipt, + CoreIndex, CoreState, Hash, Id as ParaId, NodeFeatures, OccupiedCoreAssumption, SessionIndex, + SignedAvailabilityBitfield, ValidatorIndex, }; -use std::collections::{BTreeMap, HashMap, HashSet}; +use std::collections::{BTreeMap, HashMap}; mod disputes; mod error; @@ -598,13 +598,11 @@ async fn select_candidate_hashes_from_tracked( candidates: &[CandidateReceipt], relay_parent: Hash, sender: &mut impl overseer::ProvisionerSenderTrait, -) -> Result, Error> { +) -> Result>, Error> { let block_number = get_block_number_under_construction(relay_parent, sender).await?; let mut selected_candidates = - Vec::with_capacity(candidates.len().min(availability_cores.len())); - let mut selected_parachains = - HashSet::with_capacity(candidates.len().min(availability_cores.len())); + HashMap::with_capacity(candidates.len().min(availability_cores.len())); gum::debug!( target: LOG_TARGET, @@ -638,7 +636,7 @@ async fn select_candidate_hashes_from_tracked( CoreState::Free => continue, }; - if selected_parachains.contains(&scheduled_core.para_id) { + if selected_candidates.contains_key(&scheduled_core.para_id) { // We already picked a candidate for this parachain. Elastic scaling only works with // prospective parachains mode. continue @@ -677,8 +675,10 @@ async fn select_candidate_hashes_from_tracked( "Selected candidate receipt", ); - selected_parachains.insert(candidate.descriptor.para_id); - selected_candidates.push((candidate_hash, candidate.descriptor.relay_parent)); + selected_candidates.insert( + candidate.descriptor.para_id, + vec![(candidate_hash, candidate.descriptor.relay_parent)], + ); } } @@ -695,12 +695,12 @@ async fn request_backable_candidates( bitfields: &[SignedAvailabilityBitfield], relay_parent: Hash, sender: &mut impl overseer::ProvisionerSenderTrait, -) -> Result, Error> { +) -> Result>, Error> { let block_number = get_block_number_under_construction(relay_parent, sender).await?; // Record how many cores are scheduled for each paraid. Use a BTreeMap because // we'll need to iterate through them. - let mut scheduled_cores: BTreeMap = BTreeMap::new(); + let mut scheduled_cores_per_para: BTreeMap = BTreeMap::new(); // The on-chain ancestors of a para present in availability-cores. let mut ancestors: HashMap = HashMap::with_capacity(availability_cores.len()); @@ -709,7 +709,7 @@ async fn request_backable_candidates( let core_idx = CoreIndex(core_idx as u32); match core { CoreState::Scheduled(scheduled_core) => { - *scheduled_cores.entry(scheduled_core.para_id).or_insert(0) += 1; + *scheduled_cores_per_para.entry(scheduled_core.para_id).or_insert(0) += 1; }, CoreState::Occupied(occupied_core) => { let is_available = bitfields_indicate_availability( @@ -726,14 +726,14 @@ async fn request_backable_candidates( if let Some(ref scheduled_core) = occupied_core.next_up_on_available { // Request a new backable candidate for the newly scheduled para id. - *scheduled_cores.entry(scheduled_core.para_id).or_insert(0) += 1; + *scheduled_cores_per_para.entry(scheduled_core.para_id).or_insert(0) += 1; } } else if occupied_core.time_out_at <= block_number { // Timed out before being available. if let Some(ref scheduled_core) = occupied_core.next_up_on_time_out { // Candidate's availability timed out, practically same as scheduled. - *scheduled_cores.entry(scheduled_core.para_id).or_insert(0) += 1; + *scheduled_cores_per_para.entry(scheduled_core.para_id).or_insert(0) += 1; } } else { // Not timed out and not available. @@ -747,10 +747,10 @@ async fn request_backable_candidates( }; } - let mut selected_candidates: Vec<(CandidateHash, Hash)> = - Vec::with_capacity(availability_cores.len()); + let mut selected_candidates: HashMap> = + HashMap::with_capacity(scheduled_cores_per_para.len()); - for (para_id, core_count) in scheduled_cores { + for (para_id, core_count) in scheduled_cores_per_para { let para_ancestors = ancestors.remove(¶_id).unwrap_or_default(); // If elastic scaling MVP is disabled, only allow one candidate per parachain. @@ -777,7 +777,7 @@ async fn request_backable_candidates( continue } - selected_candidates.extend(response.into_iter().take(core_count)); + selected_candidates.insert(para_id, response); } Ok(selected_candidates) @@ -826,33 +826,38 @@ async fn select_candidates( selected_candidates.clone(), tx, )); - let mut candidates = rx.await.map_err(|err| Error::CanceledBackedCandidates(err))?; + let candidates = rx.await.map_err(|err| Error::CanceledBackedCandidates(err))?; gum::trace!(target: LOG_TARGET, leaf_hash=?relay_parent, "Got {} backed candidates", candidates.len()); // keep only one candidate with validation code. let mut with_validation_code = false; - candidates.retain(|c| { - if c.candidate().commitments.new_validation_code.is_some() { - if with_validation_code { - return false + // merge the candidates into a common collection, preserving the order + let mut merged_candidates = Vec::with_capacity(availability_cores.len()); + + for para_candidates in candidates.into_values() { + for candidate in para_candidates { + if candidate.candidate().commitments.new_validation_code.is_some() { + if with_validation_code { + break + } else { + with_validation_code = true; + } } - with_validation_code = true; + merged_candidates.push(candidate); } - - true - }); + } gum::debug!( target: LOG_TARGET, - n_candidates = candidates.len(), + n_candidates = merged_candidates.len(), n_cores = availability_cores.len(), ?relay_parent, "Selected backed candidates", ); - Ok(candidates) + Ok(merged_candidates) } /// Produces a block number 1 higher than that of the relay parent diff --git a/polkadot/node/core/provisioner/src/tests.rs b/polkadot/node/core/provisioner/src/tests.rs index bdb4f85f4009..823b1d86e461 100644 --- a/polkadot/node/core/provisioner/src/tests.rs +++ b/polkadot/node/core/provisioner/src/tests.rs @@ -258,6 +258,8 @@ mod select_candidates { BlockNumber, CandidateCommitments, CommittedCandidateReceipt, PersistedValidationData, }; use rstest::rstest; + use std::ops::Not; + use CoreState::{Free, Scheduled}; const BLOCK_UNDER_PRODUCTION: BlockNumber = 128; @@ -323,9 +325,6 @@ mod select_candidates { // 11: Occupied(next_up_on_available and available, but different successor para_id) // ] fn mock_availability_cores_one_per_para() -> Vec { - use std::ops::Not; - use CoreState::{Free, Scheduled}; - vec![ // 0: Free, Free, @@ -389,9 +388,6 @@ mod select_candidates { // For test purposes with multiple possible cores assigned to a para, we always return this set // of availability cores: fn mock_availability_cores_multiple_per_para() -> Vec { - use std::ops::Not; - use CoreState::{Free, Scheduled}; - vec![ // 0: Free, Free, @@ -562,7 +558,10 @@ mod select_candidates { use ChainApiMessage::BlockNumber; use RuntimeApiMessage::Request; - let mut backed_iter = expected.clone().into_iter(); + let mut backed = expected.clone().into_iter().fold(HashMap::new(), |mut acc, candidate| { + acc.entry(candidate.descriptor().para_id).or_insert(vec![]).push(candidate); + acc + }); expected.sort_by_key(|c| c.candidate().descriptor.para_id); let mut candidates_iter = expected @@ -583,11 +582,30 @@ mod select_candidates { hashes, sender, )) => { - let response: Vec = - backed_iter.by_ref().take(hashes.len()).collect(); - let expected_hashes: Vec<(CandidateHash, Hash)> = response + let mut response: HashMap> = HashMap::new(); + for (para_id, requested_candidates) in hashes.clone() { + response.insert( + para_id, + backed + .get_mut(¶_id) + .unwrap() + .drain(0..requested_candidates.len()) + .collect(), + ); + } + let expected_hashes: HashMap> = response .iter() - .map(|candidate| (candidate.hash(), candidate.descriptor().relay_parent)) + .map(|(para_id, candidates)| { + ( + *para_id, + candidates + .iter() + .map(|candidate| { + (candidate.hash(), candidate.descriptor().relay_parent) + }) + .collect(), + ) + }) .collect(); assert_eq!(expected_hashes, hashes); @@ -768,7 +786,7 @@ mod select_candidates { #[rstest] #[case(ProspectiveParachainsMode::Disabled)] #[case(ProspectiveParachainsMode::Enabled {max_candidate_depth: 0, allowed_ancestry_len: 0})] - fn selects_max_one_code_upgrade( + fn selects_max_one_code_upgrade_one_core_per_para( #[case] prospective_parachains_mode: ProspectiveParachainsMode, ) { let mock_cores = mock_availability_cores_one_per_para(); @@ -780,7 +798,12 @@ mod select_candidates { let cores = [1, 4, 7, 8, 10, 12]; let cores_with_code = [1, 4, 8]; - let expected_cores = [1, 7, 10, 12]; + // We can't be sure which one code upgrade the provisioner will pick. We can only assert + // that it only picks one. These are the possible cores for which the provisioner will + // supply candidates. + // There are multiple possibilities depending on which code upgrade it + // chooses. + let possible_expected_cores = [[1, 7, 10, 12], [4, 7, 10, 12], [7, 8, 10, 12]]; let committed_receipts: Vec<_> = (0..=mock_cores.len()) .map(|i| { @@ -820,8 +843,10 @@ mod select_candidates { // Then, some of them get filtered due to new validation code rule. let expected_backed: Vec<_> = cores.iter().map(|&idx| backed_candidates[idx].clone()).collect(); - let expected_backed_filtered: Vec<_> = - expected_cores.iter().map(|&idx| candidates[idx].clone()).collect(); + let expected_backed_filtered: Vec> = possible_expected_cores + .iter() + .map(|indices| indices.iter().map(|&idx| candidates[idx].clone()).collect()) + .collect(); let mock_cores_clone = mock_cores.clone(); @@ -850,13 +875,120 @@ mod select_candidates { assert_eq!(result.len(), 4); - result.into_iter().for_each(|c| { - assert!( - expected_backed_filtered.iter().any(|c2| c.candidate().corresponds_to(c2)), - "Failed to find candidate: {:?}", - c, - ) - }); + assert!(expected_backed_filtered.iter().any(|expected_backed_filtered| { + result.clone().into_iter().all(|c| { + expected_backed_filtered.iter().any(|c2| c.candidate().corresponds_to(c2)) + }) + })); + }, + ) + } + + #[test] + fn selects_max_one_code_upgrade_multiple_cores_per_para() { + let prospective_parachains_mode = + ProspectiveParachainsMode::Enabled { max_candidate_depth: 0, allowed_ancestry_len: 0 }; + let mock_cores = vec![ + // 0: Scheduled(default), + Scheduled(scheduled_core(1)), + // 1: Scheduled(default), + Scheduled(scheduled_core(2)), + // 2: Scheduled(default), + Scheduled(scheduled_core(2)), + // 3: Scheduled(default), + Scheduled(scheduled_core(2)), + // 4: Scheduled(default), + Scheduled(scheduled_core(3)), + // 5: Scheduled(default), + Scheduled(scheduled_core(3)), + // 6: Scheduled(default), + Scheduled(scheduled_core(3)), + ]; + + let empty_hash = PersistedValidationData::::default().hash(); + let cores_with_code = [0, 2, 4, 5]; + + // We can't be sure which one code upgrade the provisioner will pick. We can only assert + // that it only picks one. + // These are the possible cores for which the provisioner will + // supply candidates. There are multiple possibilities depending on which code upgrade it + // chooses. + let possible_expected_cores = [vec![0, 1], vec![1, 2, 3], vec![4, 1]]; + + let committed_receipts: Vec<_> = (0..mock_cores.len()) + .map(|i| { + let mut descriptor = dummy_candidate_descriptor(dummy_hash()); + descriptor.para_id = mock_cores[i].para_id().unwrap(); + descriptor.persisted_validation_data_hash = empty_hash; + descriptor.pov_hash = Hash::from_low_u64_be(i as u64); + CommittedCandidateReceipt { + descriptor, + commitments: CandidateCommitments { + new_validation_code: if cores_with_code.contains(&i) { + Some(vec![].into()) + } else { + None + }, + ..Default::default() + }, + } + }) + .collect(); + + // Input to select_candidates + let candidates: Vec<_> = committed_receipts.iter().map(|r| r.to_plain()).collect(); + // Build possible outputs from select_candidates + let backed_candidates: Vec<_> = committed_receipts + .iter() + .map(|committed_receipt| { + BackedCandidate::new( + committed_receipt.clone(), + Vec::new(), + default_bitvec(MOCK_GROUP_SIZE), + None, + ) + }) + .collect(); + + // First, provisioner will request backable candidates for each scheduled core. + // Then, some of them get filtered due to new validation code rule. + let expected_backed: Vec<_> = + (0..mock_cores.len()).map(|idx| backed_candidates[idx].clone()).collect(); + let expected_backed_filtered: Vec> = possible_expected_cores + .iter() + .map(|indices| indices.iter().map(|&idx| candidates[idx].clone()).collect()) + .collect(); + + let mock_cores_clone = mock_cores.clone(); + + test_harness( + |r| { + mock_overseer( + r, + mock_cores_clone, + expected_backed, + HashMap::new(), + prospective_parachains_mode, + ) + }, + |mut tx: TestSubsystemSender| async move { + let result = select_candidates( + &mock_cores, + &[], + &candidates, + prospective_parachains_mode, + true, + Default::default(), + &mut tx, + ) + .await + .unwrap(); + + assert!(expected_backed_filtered.iter().any(|expected_backed_filtered| { + result.clone().into_iter().all(|c| { + expected_backed_filtered.iter().any(|c2| c.candidate().corresponds_to(c2)) + }) && (expected_backed_filtered.len() == result.len()) + })); }, ) } diff --git a/polkadot/node/core/pvf-checker/Cargo.toml b/polkadot/node/core/pvf-checker/Cargo.toml index f4f954e316c0..91b12b868097 100644 --- a/polkadot/node/core/pvf-checker/Cargo.toml +++ b/polkadot/node/core/pvf-checker/Cargo.toml @@ -10,7 +10,7 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" thiserror = { workspace = true } gum = { package = "tracing-gum", path = "../../gum" } diff --git a/polkadot/node/core/pvf-checker/src/tests.rs b/polkadot/node/core/pvf-checker/src/tests.rs index b0401ecdc3bd..b2365fe53e52 100644 --- a/polkadot/node/core/pvf-checker/src/tests.rs +++ b/polkadot/node/core/pvf-checker/src/tests.rs @@ -39,8 +39,8 @@ use std::{collections::HashMap, sync::Arc, time::Duration}; type VirtualOverseer = TestSubsystemContextHandle; -fn dummy_validation_code_hash(descriminator: u8) -> ValidationCodeHash { - ValidationCode(vec![descriminator]).hash() +fn dummy_validation_code_hash(discriminator: u8) -> ValidationCodeHash { + ValidationCode(vec![discriminator]).hash() } struct StartsNewSession { @@ -511,7 +511,7 @@ fn reactivating_pvf_leads_to_second_check() { .reply(PreCheckOutcome::Valid); test_state.expect_submit_vote(&mut handle).await.reply_ok(); - // Now activate a descdedant leaf, where the PVF is not present. + // Now activate a descendant leaf, where the PVF is not present. test_state .active_leaves_update( &mut handle, diff --git a/polkadot/node/core/pvf/Cargo.toml b/polkadot/node/core/pvf/Cargo.toml index 9ed64b88ffde..a0233d6b7517 100644 --- a/polkadot/node/core/pvf/Cargo.toml +++ b/polkadot/node/core/pvf/Cargo.toml @@ -14,7 +14,7 @@ always-assert = "0.1" array-bytes = "6.1" blake3 = "1.5" cfg-if = "1.0" -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } is_executable = "1.0.1" @@ -50,7 +50,7 @@ hex-literal = "0.4.1" polkadot-node-core-pvf-common = { path = "common", features = ["test-utils"] } # For benches and integration tests, depend on ourselves with the test-utils # feature. -polkadot-node-core-pvf = { path = ".", features = ["test-utils"] } +polkadot-node-core-pvf = { path = "", features = ["test-utils"] } rococo-runtime = { path = "../../../runtime/rococo" } adder = { package = "test-parachain-adder", path = "../../../parachain/test-parachains/adder" } diff --git a/polkadot/node/core/pvf/common/Cargo.toml b/polkadot/node/core/pvf/common/Cargo.toml index 56bad9792fa0..f3eb9d919aae 100644 --- a/polkadot/node/core/pvf/common/Cargo.toml +++ b/polkadot/node/core/pvf/common/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] cfg-if = "1.0" cpu-time = "1.0.0" -futures = "0.3.21" +futures = "0.3.30" gum = { package = "tracing-gum", path = "../../../gum" } libc = "0.2.152" thiserror = { workspace = true } diff --git a/polkadot/node/core/pvf/common/src/pvf.rs b/polkadot/node/core/pvf/common/src/pvf.rs index 534905fbe97a..4f1c9a3c1ea5 100644 --- a/polkadot/node/core/pvf/common/src/pvf.rs +++ b/polkadot/node/core/pvf/common/src/pvf.rs @@ -84,9 +84,9 @@ impl PvfPrepData { /// Creates a structure for tests. #[cfg(feature = "test-utils")] pub fn from_discriminator_and_timeout(num: u32, timeout: Duration) -> Self { - let descriminator_buf = num.to_le_bytes().to_vec(); + let discriminator_buf = num.to_le_bytes().to_vec(); Self::from_code( - descriminator_buf, + discriminator_buf, ExecutorParams::default(), timeout, PrepareJobKind::Compilation, diff --git a/polkadot/node/core/pvf/src/host.rs b/polkadot/node/core/pvf/src/host.rs index 8ec46f4b08f1..59d5a7e20a88 100644 --- a/polkadot/node/core/pvf/src/host.rs +++ b/polkadot/node/core/pvf/src/host.rs @@ -917,7 +917,7 @@ async fn sweeper_task(mut sweeper_rx: mpsc::Receiver) { gum::trace!( target: LOG_TARGET, ?result, - "Sweeped the artifact file {}", + "Swept the artifact file {}", condemned.display(), ); }, diff --git a/polkadot/node/core/pvf/src/priority.rs b/polkadot/node/core/pvf/src/priority.rs index d4bd49eaee84..d1ef9c604b11 100644 --- a/polkadot/node/core/pvf/src/priority.rs +++ b/polkadot/node/core/pvf/src/priority.rs @@ -29,7 +29,7 @@ pub enum Priority { } impl Priority { - /// Returns `true` if `self` is `Crticial` + /// Returns `true` if `self` is `Critical` pub fn is_critical(self) -> bool { self == Priority::Critical } diff --git a/polkadot/node/core/pvf/src/worker_interface.rs b/polkadot/node/core/pvf/src/worker_interface.rs index ad9f0294c094..93fffc806622 100644 --- a/polkadot/node/core/pvf/src/worker_interface.rs +++ b/polkadot/node/core/pvf/src/worker_interface.rs @@ -130,11 +130,11 @@ where fn make_tmppath(prefix: &str, dir: &Path) -> PathBuf { use rand::distributions::Alphanumeric; - const DESCRIMINATOR_LEN: usize = 10; + const DISCRIMINATOR_LEN: usize = 10; - let mut buf = Vec::with_capacity(prefix.len() + DESCRIMINATOR_LEN); + let mut buf = Vec::with_capacity(prefix.len() + DISCRIMINATOR_LEN); buf.extend(prefix.as_bytes()); - buf.extend(rand::thread_rng().sample_iter(&Alphanumeric).take(DESCRIMINATOR_LEN)); + buf.extend(rand::thread_rng().sample_iter(&Alphanumeric).take(DISCRIMINATOR_LEN)); let s = std::str::from_utf8(&buf) .expect("the string is collected from a valid utf-8 sequence; qed"); diff --git a/polkadot/node/core/pvf/tests/it/main.rs b/polkadot/node/core/pvf/tests/it/main.rs index cdfbcd8e5785..16ef23c69cad 100644 --- a/polkadot/node/core/pvf/tests/it/main.rs +++ b/polkadot/node/core/pvf/tests/it/main.rs @@ -543,9 +543,9 @@ async fn all_security_features_work() { // artifacts cache existing. #[cfg(all(feature = "ci-only-tests", target_os = "linux"))] #[tokio::test] -async fn nonexistant_cache_dir() { +async fn nonexistent_cache_dir() { let host = TestHost::new_with_config(|cfg| { - cfg.cache_path = cfg.cache_path.join("nonexistant_cache_dir"); + cfg.cache_path = cfg.cache_path.join("nonexistent_cache_dir"); }) .await; diff --git a/polkadot/node/core/runtime-api/Cargo.toml b/polkadot/node/core/runtime-api/Cargo.toml index 2de3a6ee325a..91f5c35b2794 100644 --- a/polkadot/node/core/runtime-api/Cargo.toml +++ b/polkadot/node/core/runtime-api/Cargo.toml @@ -10,7 +10,7 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } schnellru = "0.2.1" @@ -25,8 +25,8 @@ polkadot-node-subsystem-types = { path = "../../subsystem-types" } sp-api = { path = "../../../../substrate/primitives/api" } sp-core = { path = "../../../../substrate/primitives/core" } sp-keyring = { path = "../../../../substrate/primitives/keyring" } -async-trait = "0.1.74" -futures = { version = "0.3.21", features = ["thread-pool"] } +async-trait = "0.1.79" +futures = { version = "0.3.30", features = ["thread-pool"] } polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } polkadot-node-primitives = { path = "../../primitives" } test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../../primitives/test-helpers" } diff --git a/polkadot/node/core/runtime-api/src/cache.rs b/polkadot/node/core/runtime-api/src/cache.rs index 5eca551db0a6..acdb256ab36c 100644 --- a/polkadot/node/core/runtime-api/src/cache.rs +++ b/polkadot/node/core/runtime-api/src/cache.rs @@ -14,17 +14,16 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use std::collections::btree_map::BTreeMap; +use std::collections::{btree_map::BTreeMap, VecDeque}; use schnellru::{ByLength, LruMap}; use sp_consensus_babe::Epoch; use polkadot_primitives::{ - async_backing, slashing, - vstaging::{self, ApprovalVotingParams}, - AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, - Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, + async_backing, slashing, ApprovalVotingParams, AuthorityDiscoveryId, BlockNumber, + CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreIndex, + CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, Id as ParaId, + InboundDownwardMessage, InboundHrmpMessage, NodeFeatures, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; @@ -68,8 +67,9 @@ pub(crate) struct RequestResultCache { disabled_validators: LruMap>, para_backing_state: LruMap<(Hash, ParaId), Option>, async_backing_params: LruMap, - node_features: LruMap, + node_features: LruMap, approval_voting_params: LruMap, + claim_queue: LruMap>>, } impl Default for RequestResultCache { @@ -105,6 +105,7 @@ impl Default for RequestResultCache { para_backing_state: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), async_backing_params: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), node_features: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), + claim_queue: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), } } } @@ -451,17 +452,14 @@ impl RequestResultCache { self.minimum_backing_votes.insert(session_index, minimum_backing_votes); } - pub(crate) fn node_features( - &mut self, - session_index: SessionIndex, - ) -> Option<&vstaging::NodeFeatures> { + pub(crate) fn node_features(&mut self, session_index: SessionIndex) -> Option<&NodeFeatures> { self.node_features.get(&session_index).map(|f| &*f) } pub(crate) fn cache_node_features( &mut self, session_index: SessionIndex, - features: vstaging::NodeFeatures, + features: NodeFeatures, ) { self.node_features.insert(session_index, features); } @@ -525,6 +523,21 @@ impl RequestResultCache { ) { self.approval_voting_params.insert(session_index, value); } + + pub(crate) fn claim_queue( + &mut self, + relay_parent: &Hash, + ) -> Option<&BTreeMap>> { + self.claim_queue.get(relay_parent).map(|v| &*v) + } + + pub(crate) fn cache_claim_queue( + &mut self, + relay_parent: Hash, + value: BTreeMap>, + ) { + self.claim_queue.insert(relay_parent, value); + } } pub(crate) enum RequestResult { @@ -576,5 +589,6 @@ pub(crate) enum RequestResult { DisabledValidators(Hash, Vec), ParaBackingState(Hash, ParaId, Option), AsyncBackingParams(Hash, async_backing::AsyncBackingParams), - NodeFeatures(SessionIndex, vstaging::NodeFeatures), + NodeFeatures(SessionIndex, NodeFeatures), + ClaimQueue(Hash, BTreeMap>), } diff --git a/polkadot/node/core/runtime-api/src/lib.rs b/polkadot/node/core/runtime-api/src/lib.rs index 3ff1a35d0687..2b7f6fc2d609 100644 --- a/polkadot/node/core/runtime-api/src/lib.rs +++ b/polkadot/node/core/runtime-api/src/lib.rs @@ -177,6 +177,9 @@ where self.requests_cache.cache_async_backing_params(relay_parent, params), NodeFeatures(session_index, params) => self.requests_cache.cache_node_features(session_index, params), + ClaimQueue(relay_parent, sender) => { + self.requests_cache.cache_claim_queue(relay_parent, sender); + }, } } @@ -329,6 +332,8 @@ where Some(Request::NodeFeatures(index, sender)) } }, + Request::ClaimQueue(sender) => + query!(claim_queue(), sender).map(|sender| Request::ClaimQueue(sender)), } } @@ -626,5 +631,11 @@ where sender, result = (index) ), + Request::ClaimQueue(sender) => query!( + ClaimQueue, + claim_queue(), + ver = Request::CLAIM_QUEUE_RUNTIME_REQUIREMENT, + sender + ), } } diff --git a/polkadot/node/core/runtime-api/src/tests.rs b/polkadot/node/core/runtime-api/src/tests.rs index f91723b3d39e..73c661c40762 100644 --- a/polkadot/node/core/runtime-api/src/tests.rs +++ b/polkadot/node/core/runtime-api/src/tests.rs @@ -20,18 +20,17 @@ use polkadot_node_primitives::{BabeAllowedSlots, BabeEpoch, BabeEpochConfigurati use polkadot_node_subsystem::SpawnGlue; use polkadot_node_subsystem_test_helpers::make_subsystem_context; use polkadot_primitives::{ - async_backing, slashing, - vstaging::{ApprovalVotingParams, NodeFeatures}, - AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, - Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, + async_backing, slashing, ApprovalVotingParams, AuthorityDiscoveryId, BlockNumber, + CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreIndex, + CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Id as ParaId, + InboundDownwardMessage, InboundHrmpMessage, NodeFeatures, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, Slot, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; use sp_api::ApiError; use sp_core::testing::TaskExecutor; use std::{ - collections::{BTreeMap, HashMap}, + collections::{BTreeMap, HashMap, VecDeque}, sync::{Arc, Mutex}, }; use test_helpers::{dummy_committed_candidate_receipt, dummy_validation_code}; @@ -286,17 +285,24 @@ impl RuntimeApiSubsystemClient for MockSubsystemClient { async fn disabled_validators(&self, _: Hash) -> Result, ApiError> { todo!("Not required for tests") } + + async fn claim_queue( + &self, + _: Hash, + ) -> Result>, ApiError> { + todo!("Not required for tests") + } } #[test] fn requests_authorities() { let (ctx, mut ctx_handle) = make_subsystem_context(TaskExecutor::new()); - let substem_client = Arc::new(MockSubsystemClient::default()); + let subsystem_client = Arc::new(MockSubsystemClient::default()); let relay_parent = [1; 32].into(); let spawner = sp_core::testing::TaskExecutor::new(); let subsystem = - RuntimeApiSubsystem::new(substem_client.clone(), Metrics(None), SpawnGlue(spawner)); + RuntimeApiSubsystem::new(subsystem_client.clone(), Metrics(None), SpawnGlue(spawner)); let subsystem_task = run(ctx, subsystem).map(|x| x.unwrap()); let test_task = async move { let (tx, rx) = oneshot::channel(); @@ -307,7 +313,7 @@ fn requests_authorities() { }) .await; - assert_eq!(rx.await.unwrap().unwrap(), substem_client.authorities); + assert_eq!(rx.await.unwrap().unwrap(), subsystem_client.authorities); ctx_handle.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; }; @@ -1034,7 +1040,7 @@ fn requests_submit_pvf_check_statement() { let _ = rx.await.unwrap().unwrap(); assert_eq!( - &*subsystem_client.submitted_pvf_check_statement.lock().expect("poisened mutex"), + &*subsystem_client.submitted_pvf_check_statement.lock().expect("poisoned mutex"), &[(stmt.clone(), sig.clone()), (stmt.clone(), sig.clone())] ); diff --git a/polkadot/node/jaeger/Cargo.toml b/polkadot/node/jaeger/Cargo.toml index 23ab8f842108..6fa3d41eddb1 100644 --- a/polkadot/node/jaeger/Cargo.toml +++ b/polkadot/node/jaeger/Cargo.toml @@ -18,6 +18,6 @@ polkadot-node-primitives = { path = "../primitives" } sc-network = { path = "../../../substrate/client/network" } sp-core = { path = "../../../substrate/primitives/core" } thiserror = { workspace = true } -tokio = "1.24.2" +tokio = "1.37" log = { workspace = true, default-features = true } parity-scale-codec = { version = "3.6.1", default-features = false } diff --git a/polkadot/node/jaeger/src/spans.rs b/polkadot/node/jaeger/src/spans.rs index 4038d41344f2..4816fccf3b96 100644 --- a/polkadot/node/jaeger/src/spans.rs +++ b/polkadot/node/jaeger/src/spans.rs @@ -70,7 +70,7 @@ //! let root_span = //! jaeger::Span::new(relay_parent, "root_of_aaall_spans"); //! -//! // the prefered way of adding additional delayed information: +//! // the preferred way of adding additional delayed information: //! let span = root_span.child("inner"); //! //! // ... more operations ... diff --git a/polkadot/node/malus/Cargo.toml b/polkadot/node/malus/Cargo.toml index b3eb856f08ef..2f63c2f0938d 100644 --- a/polkadot/node/malus/Cargo.toml +++ b/polkadot/node/malus/Cargo.toml @@ -40,11 +40,11 @@ polkadot-node-primitives = { path = "../primitives" } polkadot-primitives = { path = "../../primitives" } color-eyre = { version = "0.6.1", default-features = false } assert_matches = "1.5" -async-trait = "0.1.74" +async-trait = "0.1.79" sp-keystore = { path = "../../../substrate/primitives/keystore" } sp-core = { path = "../../../substrate/primitives/core" } clap = { version = "4.5.3", features = ["derive"] } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../gum" } erasure = { package = "polkadot-erasure-coding", path = "../../erasure-coding" } @@ -58,7 +58,7 @@ polkadot-node-core-pvf-prepare-worker = { path = "../core/pvf/prepare-worker" } [dev-dependencies] polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } sp-core = { path = "../../../substrate/primitives/core" } -futures = { version = "0.3.21", features = ["thread-pool"] } +futures = { version = "0.3.30", features = ["thread-pool"] } [build-dependencies] substrate-build-script-utils = { path = "../../../substrate/utils/build-script-utils" } diff --git a/polkadot/node/malus/README.md b/polkadot/node/malus/README.md index e0c7893a7531..25453a1980e4 100644 --- a/polkadot/node/malus/README.md +++ b/polkadot/node/malus/README.md @@ -18,7 +18,7 @@ defined in the [(DSL[(**D**omain **S**pecific **L**anguage)]) doc](https://parit ## Usage -> Assumes you already gained permissiones, ping in element `@javier:matrix.parity.io` to get access. +> Assumes you already gained permissions, ping in element `@javier:matrix.parity.io` to get access. > and you have cloned the [zombienet][zombienet] repo. To launch a test case in the development cluster use (e.g. for the ./node/malus/integrationtests/0001-dispute-valid-block.toml): diff --git a/polkadot/node/metrics/Cargo.toml b/polkadot/node/metrics/Cargo.toml index c567278f70ea..fbf0abf829e1 100644 --- a/polkadot/node/metrics/Cargo.toml +++ b/polkadot/node/metrics/Cargo.toml @@ -10,7 +10,7 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../gum" } @@ -30,7 +30,7 @@ log = { workspace = true, default-features = true } assert_cmd = "2.0.4" tempfile = "3.2.0" hyper = { version = "0.14.20", default-features = false, features = ["http1", "tcp"] } -tokio = "1.24.2" +tokio = "1.37" polkadot-test-service = { path = "../test/service", features = ["runtime-metrics"] } substrate-test-utils = { path = "../../../substrate/test-utils" } sc-service = { path = "../../../substrate/client/service" } diff --git a/polkadot/node/network/approval-distribution/Cargo.toml b/polkadot/node/network/approval-distribution/Cargo.toml index 2bc09c5f42ac..4c04ad83f84f 100644 --- a/polkadot/node/network/approval-distribution/Cargo.toml +++ b/polkadot/node/network/approval-distribution/Cargo.toml @@ -20,7 +20,7 @@ polkadot-node-jaeger = { path = "../../jaeger" } rand = "0.8" itertools = "0.10.5" -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } @@ -37,5 +37,5 @@ schnorrkel = { version = "0.11.4", default-features = false } # rand_core should match schnorrkel rand_core = "0.6.2" rand_chacha = "0.3.1" -env_logger = "0.9.0" +env_logger = "0.11" log = { workspace = true, default-features = true } diff --git a/polkadot/node/network/approval-distribution/src/lib.rs b/polkadot/node/network/approval-distribution/src/lib.rs index f4e40270160c..d360a18423e6 100644 --- a/polkadot/node/network/approval-distribution/src/lib.rs +++ b/polkadot/node/network/approval-distribution/src/lib.rs @@ -230,7 +230,7 @@ impl ApprovalEntry { Ok(()) } - // Get the assignment certiticate and claimed candidates. + // Get the assignment certificate and claimed candidates. pub fn assignment(&self) -> (IndirectAssignmentCertV2, CandidateBitfield) { (self.assignment.clone(), self.assignment_claimed_candidates.clone()) } @@ -404,7 +404,7 @@ impl Knowledge { }, }; - // In case of succesful insertion of multiple candidate assignments create additional + // In case of successful insertion of multiple candidate assignments create additional // entries for each assigned candidate. This fakes knowledge of individual assignments, but // we need to share the same `MessageSubject` with the followup approval candidate index. if kind == MessageKind::Assignment && success && message.1.count_ones() > 1 { @@ -1897,10 +1897,10 @@ impl State { _ => break, }; - // Any peer which is in the `known_by` see and we know its peer_id authorithy id + // Any peer which is in the `known_by` see and we know its peer_id authority id // mapping has already been sent all messages it's meant to get for that block and // all in-scope prior blocks. In case, we just learnt about its peer_id - // authorithy-id mapping we have to retry sending the messages that should be sent + // authority-id mapping we have to retry sending the messages that should be sent // to it for all un-finalized blocks. if entry.known_by.contains_key(&peer_id) && !retry_known_blocks { break @@ -2199,7 +2199,7 @@ impl State { sanitized_assignments } - // Filter out obviously invalid candidate indicies. + // Filter out obviously invalid candidate indices. async fn sanitize_v1_approvals( &mut self, peer_id: PeerId, @@ -2226,7 +2226,7 @@ impl State { sanitized_approvals } - // Filter out obviously invalid candidate indicies. + // Filter out obviously invalid candidate indices. async fn sanitize_v2_approvals( &mut self, peer_id: PeerId, @@ -2260,7 +2260,7 @@ impl State { // The modifier accepts as inputs the current required-routing state, whether // the message is locally originating, and the validator index of the message issuer. // -// Then, if the topology is known, this progates messages to all peers in the required +// Then, if the topology is known, this propagates messages to all peers in the required // routing set which are aware of the block. Peers which are unaware of the block // will have the message sent when it enters their view in `unify_with_peer`. // @@ -2440,7 +2440,7 @@ impl ApprovalDistribution { gum::trace!(target: LOG_TARGET, "active leaves signal (ignored)"); // the relay chain blocks relevant to the approval subsystems // are those that are available, but not finalized yet - // actived and deactivated heads hence are irrelevant to this subsystem, other than + // activated and deactivated heads hence are irrelevant to this subsystem, other than // for tracing purposes. if let Some(activated) = update.activated { let head = activated.hash; diff --git a/polkadot/node/network/approval-distribution/src/metrics.rs b/polkadot/node/network/approval-distribution/src/metrics.rs index 0642b1b2e0cd..60c7f2f6d3b8 100644 --- a/polkadot/node/network/approval-distribution/src/metrics.rs +++ b/polkadot/node/network/approval-distribution/src/metrics.rs @@ -299,7 +299,7 @@ impl MetricsTrait for Metrics { prometheus::CounterVec::new( prometheus::Opts::new( "polkadot_parachain_assignments_received_result", - "Result of a processed assignement", + "Result of a processed assignment", ), &["status"] )?, diff --git a/polkadot/node/network/approval-distribution/src/tests.rs b/polkadot/node/network/approval-distribution/src/tests.rs index 6c88dd53ad36..3159fe2ae5e8 100644 --- a/polkadot/node/network/approval-distribution/src/tests.rs +++ b/polkadot/node/network/approval-distribution/src/tests.rs @@ -394,7 +394,7 @@ fn try_import_the_same_assignment() { setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::V1).await; setup_peer_with_view(overseer, &peer_c, view![hash], ValidationVersion::V1).await; - // Set up a gossip topology, where a, b, c and d are topology neighboors to the node under + // Set up a gossip topology, where a, b, c and d are topology neighbors to the node under // testing. let peers_with_optional_peer_id = peers .iter() @@ -491,7 +491,7 @@ fn try_import_the_same_assignment_v2() { setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::V3).await; setup_peer_with_view(overseer, &peer_c, view![hash], ValidationVersion::V3).await; - // Set up a gossip topology, where a, b, c and d are topology neighboors to the node under + // Set up a gossip topology, where a, b, c and d are topology neighbors to the node under // testing. let peers_with_optional_peer_id = peers .iter() @@ -744,7 +744,7 @@ fn peer_sending_us_the_same_we_just_sent_them_is_ok() { .iter() .map(|(peer_id, authority)| (Some(*peer_id), authority.clone())) .collect_vec(); - // Setup a topology where peer_a is neigboor to current node. + // Setup a topology where peer_a is neighbor to current node. setup_gossip_topology( overseer, make_gossip_topology(1, &peers_with_optional_peer_id, &[0], &[2], 1), @@ -850,7 +850,7 @@ fn import_approval_happy_path_v1_v2_peers() { .iter() .map(|(peer_id, authority)| (Some(*peer_id), authority.clone())) .collect_vec(); - // Set up a gossip topology, where a, b, and c are topology neighboors to the node. + // Set up a gossip topology, where a, b, and c are topology neighbors to the node. setup_gossip_topology( overseer, make_gossip_topology(1, &peers_with_optional_peer_id, &[0, 1], &[2, 4], 3), @@ -972,7 +972,7 @@ fn import_approval_happy_path_v2() { .iter() .map(|(peer_id, authority)| (Some(*peer_id), authority.clone())) .collect_vec(); - // Set up a gossip topology, where a, b, and c are topology neighboors to the node. + // Set up a gossip topology, where a, b, and c are topology neighbors to the node. setup_gossip_topology( overseer, make_gossip_topology(1, &peers_with_optional_peer_id, &[0, 1], &[2, 4], 3), @@ -1083,7 +1083,7 @@ fn multiple_assignments_covered_with_one_approval_vote() { .iter() .map(|(peer_id, authority)| (Some(*peer_id), authority.clone())) .collect_vec(); - // Set up a gossip topology, where a, b, and c, d are topology neighboors to the node. + // Set up a gossip topology, where a, b, and c, d are topology neighbors to the node. setup_gossip_topology( overseer, make_gossip_topology(1, &peers_with_optional_peer_id, &[0, 1], &[2, 4], 3), @@ -1273,7 +1273,7 @@ fn unify_with_peer_multiple_assignments_covered_with_one_approval_vote() { .iter() .map(|(peer_id, authority)| (Some(*peer_id), authority.clone())) .collect_vec(); - // Set up a gossip topology, where a, b, and c, d are topology neighboors to the node. + // Set up a gossip topology, where a, b, and c, d are topology neighbors to the node. setup_gossip_topology( overseer, make_gossip_topology(1, &peers_with_optional_peer_id, &[0, 1], &[2, 4], 3), @@ -1631,7 +1631,7 @@ fn update_peer_view() { .iter() .map(|(peer_id, authority)| (Some(*peer_id), authority.clone())) .collect_vec(); - // Setup a topology where peer_a is neigboor to current node. + // Setup a topology where peer_a is neighbor to current node. setup_gossip_topology( overseer, make_gossip_topology(1, &peers_with_optional_peer_id, &[0], &[2], 1), @@ -1758,7 +1758,7 @@ fn update_peer_view() { assert!(state.blocks.get(&hash_c).unwrap().known_by.get(peer).is_none()); } -// Tests that updating the known peer_id for a given authorithy updates the topology +// Tests that updating the known peer_id for a given authority updates the topology // and sends the required messages #[test] fn update_peer_authority_id() { @@ -1770,9 +1770,9 @@ fn update_peer_authority_id() { let neighbour_x_index = 0; let neighbour_y_index = 2; let local_index = 1; - // X neighbour, we simulate that PeerId is not known in the beginining. + // X neighbour, we simulate that PeerId is not known in the beginning. let neighbour_x = peers.get(neighbour_x_index).unwrap().0; - // Y neighbour, we simulate that PeerId is not known in the beginining. + // Y neighbour, we simulate that PeerId is not known in the beginning. let neighbour_y = peers.get(neighbour_y_index).unwrap().0; let _state = test_harness(State::default(), |mut virtual_overseer| async move { @@ -1814,7 +1814,7 @@ fn update_peer_authority_id() { }) .collect_vec(); - // Setup a topology where peer_a is neigboor to current node. + // Setup a topology where peer_a is neighbor to current node. setup_gossip_topology( overseer, make_gossip_topology( @@ -2053,7 +2053,7 @@ fn sends_assignments_even_when_state_is_approved() { .iter() .map(|(peer_id, authority)| (Some(*peer_id), authority.clone())) .collect_vec(); - // Setup a topology where peer_a is neigboor to current node. + // Setup a topology where peer_a is neighbor to current node. setup_gossip_topology( overseer, make_gossip_topology(1, &peers_with_optional_peer_id, &[0], &[2], 1), @@ -2125,7 +2125,7 @@ fn sends_assignments_even_when_state_is_approved() { } /// Same as `sends_assignments_even_when_state_is_approved_v2` but with `VRFModuloCompact` -/// assignemnts. +/// assignments. #[test] fn sends_assignments_even_when_state_is_approved_v2() { let peers = make_peers_and_authority_ids(8); @@ -2153,7 +2153,7 @@ fn sends_assignments_even_when_state_is_approved_v2() { .iter() .map(|(peer_id, authority)| (Some(*peer_id), authority.clone())) .collect_vec(); - // Setup a topology where peer_a is neigboor to current node. + // Setup a topology where peer_a is neighbor to current node. setup_gossip_topology( overseer, make_gossip_topology(1, &peers_with_optional_peer_id, &[0], &[2], 1), @@ -3509,7 +3509,7 @@ fn import_versioned_approval() { setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::V1).await; setup_peer_with_view(overseer, &peer_c, view![hash], ValidationVersion::V2).await; - // Set up a gossip topology, where a, b, c and d are topology neighboors to the node under + // Set up a gossip topology, where a, b, c and d are topology neighbors to the node under // testing. let peers_with_optional_peer_id = peers .iter() diff --git a/polkadot/node/network/availability-distribution/Cargo.toml b/polkadot/node/network/availability-distribution/Cargo.toml index 182d92cb1631..b5636203f166 100644 --- a/polkadot/node/network/availability-distribution/Cargo.toml +++ b/polkadot/node/network/availability-distribution/Cargo.toml @@ -10,7 +10,7 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } parity-scale-codec = { version = "3.6.1", features = ["std"] } polkadot-primitives = { path = "../../../primitives" } @@ -39,9 +39,9 @@ polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } polkadot-subsystem-bench = { path = "../../subsystem-bench" } -[[test]] +[[bench]] name = "availability-distribution-regression-bench" -path = "tests/availability-distribution-regression-bench.rs" +path = "benches/availability-distribution-regression-bench.rs" harness = false required-features = ["subsystem-benchmarks"] diff --git a/polkadot/node/network/availability-distribution/tests/availability-distribution-regression-bench.rs b/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs similarity index 53% rename from polkadot/node/network/availability-distribution/tests/availability-distribution-regression-bench.rs rename to polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs index bdab11298d5c..c33674a8f2f9 100644 --- a/polkadot/node/network/availability-distribution/tests/availability-distribution-regression-bench.rs +++ b/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs @@ -24,46 +24,62 @@ //! - availability-store use polkadot_subsystem_bench::{ - availability::{benchmark_availability_write, prepare_test, TestDataAvailability, TestState}, + availability::{benchmark_availability_write, prepare_test, TestState}, configuration::TestConfiguration, - utils::{warm_up_and_benchmark, WarmUpOptions}, + usage::BenchmarkUsage, + utils::save_to_file, }; +use std::io::Write; + +const BENCH_COUNT: usize = 50; fn main() -> Result<(), String> { let mut messages = vec![]; let mut config = TestConfiguration::default(); - // A single node effort roughly n_cores * needed_approvals / n_validators = 60 * 30 / 300 - config.n_cores = 6; + // A single node effort roughly + config.n_cores = 10; + config.n_validators = 500; config.num_blocks = 3; config.generate_pov_sizes(); + let state = TestState::new(&config); - let usage = warm_up_and_benchmark( - WarmUpOptions::new(&[ - "availability-distribution", - "bitfield-distribution", - "availability-store", - ]), - || { - let mut state = TestState::new(&config); - let (mut env, _protocol_config) = - prepare_test(config.clone(), &mut state, TestDataAvailability::Write, false); + println!("Benchmarking..."); + let usages: Vec = (0..BENCH_COUNT) + .map(|n| { + print!("\r[{}{}]", "#".repeat(n), "_".repeat(BENCH_COUNT - n)); + std::io::stdout().flush().unwrap(); + let (mut env, _cfgs) = prepare_test( + &state, + polkadot_subsystem_bench::availability::TestDataAvailability::Write, + false, + ); env.runtime().block_on(benchmark_availability_write( "data_availability_write", &mut env, - state, + &state, )) - }, - )?; - println!("{}", usage); + }) + .collect(); + println!("\rDone!{}", " ".repeat(BENCH_COUNT)); + + let average_usage = BenchmarkUsage::average(&usages); + save_to_file( + "charts/availability-distribution-regression-bench.json", + average_usage.to_chart_json().map_err(|e| e.to_string())?, + ) + .map_err(|e| e.to_string())?; + println!("{}", average_usage); - messages.extend(usage.check_network_usage(&[ - ("Received from peers", 443.333, 0.05), - ("Sent to peers", 21818.555, 0.05), + // We expect no variance for received and sent + // but use 0.001 because we operate with floats + messages.extend(average_usage.check_network_usage(&[ + ("Received from peers", 433.3, 0.001), + ("Sent to peers", 18480.0, 0.001), ])); - messages.extend(usage.check_cpu_usage(&[ - ("availability-distribution", 0.011, 0.05), - ("bitfield-distribution", 0.029, 0.05), - ("availability-store", 0.232, 0.05), + messages.extend(average_usage.check_cpu_usage(&[ + ("availability-distribution", 0.012, 0.05), + ("availability-store", 0.153, 0.05), + ("bitfield-distribution", 0.026, 0.05), ])); if messages.is_empty() { diff --git a/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs b/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs index 4e23030aa499..f99002d4188b 100644 --- a/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs +++ b/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs @@ -147,9 +147,7 @@ mod tests { AllMessages, AvailabilityDistributionMessage, RuntimeApiMessage, RuntimeApiRequest, }; use polkadot_node_subsystem_test_helpers as test_helpers; - use polkadot_primitives::{ - vstaging::NodeFeatures, CandidateHash, ExecutorParams, Hash, ValidatorIndex, - }; + use polkadot_primitives::{CandidateHash, ExecutorParams, Hash, NodeFeatures, ValidatorIndex}; use test_helpers::mock::make_ferdie_keystore; use super::*; diff --git a/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs b/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs index 191ee2acd973..f478defcaa96 100644 --- a/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs +++ b/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs @@ -348,7 +348,7 @@ impl RunningTask { async fn do_request( &mut self, validator: &AuthorityDiscoveryId, - nerwork_error_freq: &mut gum::Freq, + network_error_freq: &mut gum::Freq, canceled_freq: &mut gum::Freq, ) -> std::result::Result { gum::trace!( @@ -395,7 +395,7 @@ impl RunningTask { }, Err(RequestError::NetworkError(err)) => { gum::warn_if_frequent!( - freq: nerwork_error_freq, + freq: network_error_freq, max_rate: gum::Times::PerHour(100), target: LOG_TARGET, origin = ?validator, diff --git a/polkadot/node/network/availability-distribution/src/requester/tests.rs b/polkadot/node/network/availability-distribution/src/requester/tests.rs index 2f5d900b037e..0dedd4f091ac 100644 --- a/polkadot/node/network/availability-distribution/src/requester/tests.rs +++ b/polkadot/node/network/availability-distribution/src/requester/tests.rs @@ -25,7 +25,7 @@ use polkadot_node_primitives::{BlockData, ErasureChunk, PoV}; use polkadot_node_subsystem_test_helpers::mock::new_leaf; use polkadot_node_subsystem_util::runtime::RuntimeInfo; use polkadot_primitives::{ - vstaging::NodeFeatures, BlockNumber, CoreState, ExecutorParams, GroupIndex, Hash, Id as ParaId, + BlockNumber, CoreState, ExecutorParams, GroupIndex, Hash, Id as ParaId, NodeFeatures, ScheduledCore, SessionIndex, SessionInfo, }; use sp_core::traits::SpawnNamed; diff --git a/polkadot/node/network/availability-distribution/src/tests/state.rs b/polkadot/node/network/availability-distribution/src/tests/state.rs index 66a8d8fcdcf9..1d814b4fd0ed 100644 --- a/polkadot/node/network/availability-distribution/src/tests/state.rs +++ b/polkadot/node/network/availability-distribution/src/tests/state.rs @@ -47,8 +47,8 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_primitives::{ - vstaging::NodeFeatures, CandidateHash, CoreState, ExecutorParams, GroupIndex, Hash, - Id as ParaId, ScheduledCore, SessionInfo, ValidatorIndex, + CandidateHash, CoreState, ExecutorParams, GroupIndex, Hash, Id as ParaId, NodeFeatures, + ScheduledCore, SessionInfo, ValidatorIndex, }; use test_helpers::mock::{make_ferdie_keystore, new_leaf}; diff --git a/polkadot/node/network/availability-recovery/Cargo.toml b/polkadot/node/network/availability-recovery/Cargo.toml index 12b6ce7a0571..dd0e0c432345 100644 --- a/polkadot/node/network/availability-recovery/Cargo.toml +++ b/polkadot/node/network/availability-recovery/Cargo.toml @@ -10,13 +10,13 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.21" -tokio = "1.24.2" +futures = "0.3.30" +tokio = "1.37" schnellru = "0.2.1" rand = "0.8.5" fatality = "0.0.6" thiserror = { workspace = true } -async-trait = "0.1.74" +async-trait = "0.1.79" gum = { package = "tracing-gum", path = "../../gum" } polkadot-erasure-coding = { path = "../../../erasure-coding" } @@ -30,7 +30,7 @@ sc-network = { path = "../../../../substrate/client/network" } [dev-dependencies] assert_matches = "1.4.0" -env_logger = "0.9.0" +env_logger = "0.11" futures-timer = "3.0.2" log = { workspace = true, default-features = true } @@ -43,9 +43,9 @@ polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } polkadot-subsystem-bench = { path = "../../subsystem-bench" } -[[test]] +[[bench]] name = "availability-recovery-regression-bench" -path = "tests/availability-recovery-regression-bench.rs" +path = "benches/availability-recovery-regression-bench.rs" harness = false required-features = ["subsystem-benchmarks"] diff --git a/polkadot/node/network/availability-recovery/tests/availability-recovery-regression-bench.rs b/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs similarity index 50% rename from polkadot/node/network/availability-recovery/tests/availability-recovery-regression-bench.rs rename to polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs index 42b1787e0450..46a38516898f 100644 --- a/polkadot/node/network/availability-recovery/tests/availability-recovery-regression-bench.rs +++ b/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs @@ -14,9 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! availability-write regression tests +//! availability-read regression tests //! -//! Availability write benchmark based on Kusama parameters and scale. +//! Availability read benchmark based on Kusama parameters and scale. //! //! Subsystems involved: //! - availability-recovery @@ -27,8 +27,12 @@ use polkadot_subsystem_bench::{ TestDataAvailability, TestState, }, configuration::TestConfiguration, - utils::{warm_up_and_benchmark, WarmUpOptions}, + usage::BenchmarkUsage, + utils::save_to_file, }; +use std::io::Write; + +const BENCH_COUNT: usize = 50; fn main() -> Result<(), String> { let mut messages = vec![]; @@ -38,27 +42,39 @@ fn main() -> Result<(), String> { config.num_blocks = 3; config.generate_pov_sizes(); - let usage = warm_up_and_benchmark(WarmUpOptions::new(&["availability-recovery"]), || { - let mut state = TestState::new(&config); - let (mut env, _protocol_config) = prepare_test( - config.clone(), - &mut state, - TestDataAvailability::Read(options.clone()), - false, - ); - env.runtime().block_on(benchmark_availability_read( - "data_availability_read", - &mut env, - state, - )) - })?; - println!("{}", usage); + let state = TestState::new(&config); + + println!("Benchmarking..."); + let usages: Vec = (0..BENCH_COUNT) + .map(|n| { + print!("\r[{}{}]", "#".repeat(n), "_".repeat(BENCH_COUNT - n)); + std::io::stdout().flush().unwrap(); + let (mut env, _cfgs) = + prepare_test(&state, TestDataAvailability::Read(options.clone()), false); + env.runtime().block_on(benchmark_availability_read( + "data_availability_read", + &mut env, + &state, + )) + }) + .collect(); + println!("\rDone!{}", " ".repeat(BENCH_COUNT)); + + let average_usage = BenchmarkUsage::average(&usages); + save_to_file( + "charts/availability-recovery-regression-bench.json", + average_usage.to_chart_json().map_err(|e| e.to_string())?, + ) + .map_err(|e| e.to_string())?; + println!("{}", average_usage); - messages.extend(usage.check_network_usage(&[ - ("Received from peers", 307200.000, 0.05), - ("Sent to peers", 1.667, 0.05), + // We expect no variance for received and sent + // but use 0.001 because we operate with floats + messages.extend(average_usage.check_network_usage(&[ + ("Received from peers", 307200.000, 0.001), + ("Sent to peers", 1.667, 0.001), ])); - messages.extend(usage.check_cpu_usage(&[("availability-recovery", 11.500, 0.05)])); + messages.extend(average_usage.check_cpu_usage(&[("availability-recovery", 11.500, 0.05)])); if messages.is_empty() { Ok(()) diff --git a/polkadot/node/network/availability-recovery/src/lib.rs b/polkadot/node/network/availability-recovery/src/lib.rs index 1c44ddf710b7..91c7588032db 100644 --- a/polkadot/node/network/availability-recovery/src/lib.rs +++ b/polkadot/node/network/availability-recovery/src/lib.rs @@ -407,7 +407,7 @@ async fn handle_recover( ) -> error::Result<()> { let candidate_hash = receipt.hash(); - let span = jaeger::Span::new(candidate_hash, "availbility-recovery") + let span = jaeger::Span::new(candidate_hash, "availability-recovery") .with_stage(jaeger::Stage::AvailabilityRecovery); if let Some(result) = diff --git a/polkadot/node/network/availability-recovery/src/metrics.rs b/polkadot/node/network/availability-recovery/src/metrics.rs index d82a8f9ae5fa..9f4cddc57e43 100644 --- a/polkadot/node/network/availability-recovery/src/metrics.rs +++ b/polkadot/node/network/availability-recovery/src/metrics.rs @@ -31,7 +31,7 @@ struct MetricsInner { chunk_requests_issued: Counter, /// Total number of bytes recovered /// - /// Gets incremented on each succesful recovery + /// Gets incremented on each successful recovery recovered_bytes_total: Counter, /// A counter for finished chunk requests. /// @@ -232,7 +232,7 @@ impl metrics::Metrics for Metrics { )?, full_recoveries_started: prometheus::register( Counter::new( - "polkadot_parachain_availability_recovery_recovieries_started", + "polkadot_parachain_availability_recovery_recoveries_started", "Total number of started recoveries.", )?, registry, diff --git a/polkadot/node/network/bitfield-distribution/Cargo.toml b/polkadot/node/network/bitfield-distribution/Cargo.toml index 0ddb5f643b89..6b5b784b7fd8 100644 --- a/polkadot/node/network/bitfield-distribution/Cargo.toml +++ b/polkadot/node/network/bitfield-distribution/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] always-assert = "0.1" -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } polkadot-primitives = { path = "../../../primitives" } @@ -30,6 +30,6 @@ sp-keystore = { path = "../../../../substrate/primitives/keystore" } sp-keyring = { path = "../../../../substrate/primitives/keyring" } maplit = "1.0.2" log = { workspace = true, default-features = true } -env_logger = "0.9.0" +env_logger = "0.11" assert_matches = "1.4.0" rand_chacha = "0.3.1" diff --git a/polkadot/node/network/bitfield-distribution/src/metrics.rs b/polkadot/node/network/bitfield-distribution/src/metrics.rs index 71d8a01300f2..bd956bcbe4a3 100644 --- a/polkadot/node/network/bitfield-distribution/src/metrics.rs +++ b/polkadot/node/network/bitfield-distribution/src/metrics.rs @@ -69,14 +69,14 @@ impl MetricsTrait for Metrics { let metrics = MetricsInner { sent_own_availability_bitfields: prometheus::register( prometheus::Counter::new( - "polkadot_parachain_sent_own_availabilty_bitfields_total", + "polkadot_parachain_sent_own_availability_bitfields_total", "Number of own availability bitfields sent to other peers.", )?, registry, )?, received_availability_bitfields: prometheus::register( prometheus::Counter::new( - "polkadot_parachain_received_availabilty_bitfields_total", + "polkadot_parachain_received_availability_bitfields_total", "Number of valid availability bitfields received from other peers.", )?, registry, diff --git a/polkadot/node/network/bitfield-distribution/src/tests.rs b/polkadot/node/network/bitfield-distribution/src/tests.rs index ba2434ea47d6..188b51ebccca 100644 --- a/polkadot/node/network/bitfield-distribution/src/tests.rs +++ b/polkadot/node/network/bitfield-distribution/src/tests.rs @@ -150,7 +150,7 @@ fn receive_invalid_signature() { let signing_context = SigningContext { session_index: 1, parent_hash: hash_a }; - // another validator not part of the validatorset + // another validator not part of the validator set let keystore: KeystorePtr = Arc::new(MemoryKeystore::new()); let malicious = Keystore::sr25519_generate_new(&*keystore, ValidatorId::ID, None) .expect("Malicious key created"); diff --git a/polkadot/node/network/bridge/Cargo.toml b/polkadot/node/network/bridge/Cargo.toml index 2e889fc30eb9..9c2423e7e581 100644 --- a/polkadot/node/network/bridge/Cargo.toml +++ b/polkadot/node/network/bridge/Cargo.toml @@ -11,8 +11,8 @@ workspace = true [dependencies] always-assert = "0.1" -async-trait = "0.1.74" -futures = "0.3.21" +async-trait = "0.1.79" +futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } polkadot-primitives = { path = "../../../primitives" } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } diff --git a/polkadot/node/network/bridge/src/lib.rs b/polkadot/node/network/bridge/src/lib.rs index ddce99d5c2a8..0305aaa067cc 100644 --- a/polkadot/node/network/bridge/src/lib.rs +++ b/polkadot/node/network/bridge/src/lib.rs @@ -102,6 +102,30 @@ struct SharedInner { collation_peers: HashMap, } +// Counts the number of peers that are connectioned using `version` +fn count_peers_by_version(peers: &HashMap) -> HashMap { + let mut by_version_count = HashMap::new(); + for peer in peers.values() { + *(by_version_count.entry(peer.version).or_default()) += 1; + } + by_version_count +} + +// Notes the peer count +fn note_peers_count(metrics: &Metrics, shared: &Shared) { + let guard = shared.0.lock(); + let validation_stats = count_peers_by_version(&guard.validation_peers); + let collation_stats = count_peers_by_version(&guard.collation_peers); + + for (version, count) in validation_stats { + metrics.note_peer_count(PeerSet::Validation, version, count) + } + + for (version, count) in collation_stats { + metrics.note_peer_count(PeerSet::Collation, version, count) + } +} + pub(crate) enum Mode { Syncing(Box), Active, diff --git a/polkadot/node/network/bridge/src/rx/mod.rs b/polkadot/node/network/bridge/src/rx/mod.rs index 11ac73259e3a..0a4497fc4b5a 100644 --- a/polkadot/node/network/bridge/src/rx/mod.rs +++ b/polkadot/node/network/bridge/src/rx/mod.rs @@ -262,7 +262,6 @@ async fn handle_validation_message( } metrics.on_peer_connected(peer_set, version); - metrics.note_peer_count(peer_set, version, peer_map.len()); shared.local_view.clone().unwrap_or(View::default()) }; @@ -320,8 +319,6 @@ async fn handle_validation_message( let w = peer_map.remove(&peer).is_some(); metrics.on_peer_disconnected(peer_set, version); - metrics.note_peer_count(peer_set, version, peer_map.len()); - w }; @@ -524,7 +521,6 @@ async fn handle_collation_message( } metrics.on_peer_connected(peer_set, version); - metrics.note_peer_count(peer_set, version, peer_map.len()); shared.local_view.clone().unwrap_or(View::default()) }; @@ -575,7 +571,6 @@ async fn handle_collation_message( let w = peer_map.remove(&peer).is_some(); metrics.on_peer_disconnected(peer_set, version); - metrics.note_peer_count(peer_set, version, peer_map.len()); w }; @@ -832,6 +827,7 @@ where &metrics, ¬ification_sinks, ); + note_peers_count(&metrics, &shared); } } }, diff --git a/polkadot/node/network/collator-protocol/Cargo.toml b/polkadot/node/network/collator-protocol/Cargo.toml index f0f8be0f7bab..2c7135742f56 100644 --- a/polkadot/node/network/collator-protocol/Cargo.toml +++ b/polkadot/node/network/collator-protocol/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] bitvec = { version = "1.0.1", default-features = false, features = ["alloc"] } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3" gum = { package = "tracing-gum", path = "../../gum" } @@ -30,7 +30,7 @@ tokio-util = "0.7.1" [dev-dependencies] log = { workspace = true, default-features = true } -env_logger = "0.9.0" +env_logger = "0.11" assert_matches = "1.4.0" sp-core = { path = "../../../../substrate/primitives/core", features = ["std"] } @@ -41,3 +41,7 @@ parity-scale-codec = { version = "3.6.1", features = ["std"] } polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } + +[features] +default = [] +elastic-scaling-experimental = [] diff --git a/polkadot/node/network/collator-protocol/src/collator_side/collation.rs b/polkadot/node/network/collator-protocol/src/collator_side/collation.rs index dc1ee725462f..57e1479a449b 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/collation.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/collation.rs @@ -27,7 +27,8 @@ use polkadot_node_network_protocol::{ PeerId, }; use polkadot_node_primitives::PoV; -use polkadot_primitives::{CandidateHash, CandidateReceipt, Hash, HeadData, Id as ParaId}; +use polkadot_node_subsystem::messages::ParentHeadData; +use polkadot_primitives::{CandidateHash, CandidateReceipt, Hash, Id as ParaId}; /// The status of a collation as seen from the collator. pub enum CollationStatus { @@ -59,12 +60,10 @@ impl CollationStatus { pub struct Collation { /// Candidate receipt. pub receipt: CandidateReceipt, - /// Parent head-data hash. - pub parent_head_data_hash: Hash, /// Proof to verify the state transition of the parachain. pub pov: PoV, - /// Parent head-data needed for elastic scaling. - pub parent_head_data: HeadData, + /// Parent head-data (or just hash). + pub parent_head_data: ParentHeadData, /// Collation status. pub status: CollationStatus, } diff --git a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs index dc7997aedaa9..879caf923285 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs @@ -39,7 +39,8 @@ use polkadot_node_primitives::{CollationSecondedSignal, PoV, Statement}; use polkadot_node_subsystem::{ jaeger, messages::{ - CollatorProtocolMessage, NetworkBridgeEvent, NetworkBridgeTxMessage, RuntimeApiMessage, + CollatorProtocolMessage, NetworkBridgeEvent, NetworkBridgeTxMessage, ParentHeadData, + RuntimeApiMessage, }, overseer, CollatorProtocolSenderTrait, FromOrchestra, OverseerSignal, PerLeafSpan, }; @@ -201,20 +202,40 @@ struct PeerData { version: CollationVersion, } +/// A type wrapping a collation and it's designated core index. +struct CollationWithCoreIndex(Collation, CoreIndex); + +impl CollationWithCoreIndex { + /// Returns inner collation ref. + pub fn collation(&self) -> &Collation { + &self.0 + } + + /// Returns inner collation mut ref. + pub fn collation_mut(&mut self) -> &mut Collation { + &mut self.0 + } + + /// Returns inner core index. + pub fn core_index(&self) -> &CoreIndex { + &self.1 + } +} + struct PerRelayParent { prospective_parachains_mode: ProspectiveParachainsMode, - /// Validators group responsible for backing candidates built + /// Per core index validators group responsible for backing candidates built /// on top of this relay parent. - validator_group: ValidatorGroup, + validator_group: HashMap, /// Distributed collations. - collations: HashMap, + collations: HashMap, } impl PerRelayParent { fn new(mode: ProspectiveParachainsMode) -> Self { Self { prospective_parachains_mode: mode, - validator_group: ValidatorGroup::default(), + validator_group: HashMap::default(), collations: HashMap::new(), } } @@ -348,6 +369,7 @@ async fn distribute_collation( pov: PoV, parent_head_data: HeadData, result_sender: Option>, + core_index: CoreIndex, ) -> Result<()> { let candidate_relay_parent = receipt.descriptor.relay_parent; let candidate_hash = receipt.hash(); @@ -394,12 +416,11 @@ async fn distribute_collation( return Ok(()) } - // Determine which core the para collated-on is assigned to. + // Determine which core(s) the para collated-on is assigned to. // If it is not scheduled then ignore the message. - let (our_core, num_cores) = - match determine_core(ctx.sender(), id, candidate_relay_parent, relay_parent_mode).await? { - Some(core) => core, - None => { + let (our_cores, num_cores) = + match determine_cores(ctx.sender(), id, candidate_relay_parent, relay_parent_mode).await? { + (cores, _num_cores) if cores.is_empty() => { gum::warn!( target: LOG_TARGET, para_id = %id, @@ -408,8 +429,35 @@ async fn distribute_collation( return Ok(()) }, + (cores, num_cores) => (cores, num_cores), }; + let elastic_scaling = our_cores.len() > 1; + if elastic_scaling { + gum::debug!( + target: LOG_TARGET, + para_id = %id, + cores = ?our_cores, + "{} is assigned to {} cores at {}", id, our_cores.len(), candidate_relay_parent, + ); + } + + // Double check that the specified `core_index` is among the ones our para has assignments for. + if !our_cores.iter().any(|assigned_core| assigned_core == &core_index) { + gum::warn!( + target: LOG_TARGET, + para_id = %id, + relay_parent = ?candidate_relay_parent, + cores = ?our_cores, + ?core_index, + "Attempting to distribute collation for a core we are not assigned to ", + ); + + return Ok(()) + } + + let our_core = core_index; + // Determine the group on that core. // // When prospective parachains are disabled, candidate relay parent here is @@ -451,10 +499,12 @@ async fn distribute_collation( "Accepted collation, connecting to validators." ); - let validators_at_relay_parent = &mut per_relay_parent.validator_group.validators; - if validators_at_relay_parent.is_empty() { - *validators_at_relay_parent = validators; - } + // Insert validator group for the `core_index` at relay parent. + per_relay_parent.validator_group.entry(core_index).or_insert_with(|| { + let mut group = ValidatorGroup::default(); + group.validators = validators; + group + }); // Update a set of connected validators if necessary. connect_to_validators(ctx, &state.validator_groups_buf).await; @@ -463,15 +513,18 @@ async fn distribute_collation( state.collation_result_senders.insert(candidate_hash, result_sender); } + let parent_head_data = if elastic_scaling { + ParentHeadData::WithData { hash: parent_head_data_hash, head_data: parent_head_data } + } else { + ParentHeadData::OnlyHash(parent_head_data_hash) + }; + per_relay_parent.collations.insert( candidate_hash, - Collation { - receipt, - parent_head_data_hash, - pov, - parent_head_data, - status: CollationStatus::Created, - }, + CollationWithCoreIndex( + Collation { receipt, pov, parent_head_data, status: CollationStatus::Created }, + core_index, + ), ); // If prospective parachains are disabled, a leaf should be known to peer. @@ -512,15 +565,17 @@ async fn distribute_collation( Ok(()) } -/// Get the Id of the Core that is assigned to the para being collated on if any +/// Get the core indices that are assigned to the para being collated on if any /// and the total number of cores. -async fn determine_core( +async fn determine_cores( sender: &mut impl overseer::SubsystemSender, para_id: ParaId, relay_parent: Hash, relay_parent_mode: ProspectiveParachainsMode, -) -> Result> { +) -> Result<(Vec, usize)> { let cores = get_availability_cores(sender, relay_parent).await?; + let n_cores = cores.len(); + let mut assigned_cores = Vec::new(); for (idx, core) in cores.iter().enumerate() { let core_para_id = match core { @@ -537,11 +592,11 @@ async fn determine_core( }; if core_para_id == Some(para_id) { - return Ok(Some(((idx as u32).into(), cores.len()))) + assigned_cores.push(CoreIndex::from(idx as u32)); } } - Ok(None) + Ok((assigned_cores, n_cores)) } /// Validators of a particular group index. @@ -675,7 +730,10 @@ async fn advertise_collation( advertisement_timeouts: &mut FuturesUnordered, metrics: &Metrics, ) { - for (candidate_hash, collation) in per_relay_parent.collations.iter_mut() { + for (candidate_hash, collation_and_core) in per_relay_parent.collations.iter_mut() { + let core_index = *collation_and_core.core_index(); + let collation = collation_and_core.collation_mut(); + // Check that peer will be able to request the collation. if let CollationVersion::V1 = protocol_version { if per_relay_parent.prospective_parachains_mode.is_enabled() { @@ -689,11 +747,17 @@ async fn advertise_collation( } } - let should_advertise = - per_relay_parent - .validator_group - .should_advertise_to(candidate_hash, peer_ids, &peer); + let Some(validator_group) = per_relay_parent.validator_group.get_mut(&core_index) else { + gum::debug!( + target: LOG_TARGET, + ?relay_parent, + ?core_index, + "Skipping advertising to validator, validator group for core not found", + ); + return + }; + let should_advertise = validator_group.should_advertise_to(candidate_hash, peer_ids, &peer); match should_advertise { ShouldAdvertiseTo::Yes => {}, ShouldAdvertiseTo::NotAuthority | ShouldAdvertiseTo::AlreadyAdvertised => { @@ -724,7 +788,7 @@ async fn advertise_collation( let wire_message = protocol_v2::CollatorProtocolMessage::AdvertiseCollation { relay_parent, candidate_hash: *candidate_hash, - parent_head_data_hash: collation.parent_head_data_hash, + parent_head_data_hash: collation.parent_head_data.hash(), }; Versioned::V2(protocol_v2::CollationProtocol::CollatorProtocol(wire_message)) }, @@ -741,9 +805,7 @@ async fn advertise_collation( )) .await; - per_relay_parent - .validator_group - .advertised_to_peer(candidate_hash, &peer_ids, peer); + validator_group.advertised_to_peer(candidate_hash, &peer_ids, peer); advertisement_timeouts.push(ResetInterestTimeout::new( *candidate_hash, @@ -775,6 +837,7 @@ async fn process_msg( pov, parent_head_data, result_sender, + core_index, } => { let _span1 = state .span_per_relay_parent @@ -805,6 +868,7 @@ async fn process_msg( pov, parent_head_data, result_sender, + core_index, ) .await?; }, @@ -848,7 +912,7 @@ async fn send_collation( request: VersionedCollationRequest, receipt: CandidateReceipt, pov: PoV, - _parent_head_data: HeadData, + parent_head_data: ParentHeadData, ) { let (tx, rx) = oneshot::channel(); @@ -856,20 +920,25 @@ async fn send_collation( let peer_id = request.peer_id(); let candidate_hash = receipt.hash(); - // The response payload is the same for v1 and v2 versions of protocol - // and doesn't have v2 alias for simplicity. - // For now, we don't send parent head data to the collation requester. - let result = - // if assigned_multiple_cores { - // Ok(request_v1::CollationFetchingResponse::CollationWithParentHeadData { - // receipt, - // pov, - // parent_head_data, - // }) - // } else { + #[cfg(feature = "elastic-scaling-experimental")] + let result = match parent_head_data { + ParentHeadData::WithData { head_data, .. } => + Ok(request_v2::CollationFetchingResponse::CollationWithParentHeadData { + receipt, + pov, + parent_head_data: head_data, + }), + ParentHeadData::OnlyHash(_) => + Ok(request_v1::CollationFetchingResponse::Collation(receipt, pov)), + }; + #[cfg(not(feature = "elastic-scaling-experimental"))] + let result = { + // suppress unused warning + let _parent_head_data = parent_head_data; + Ok(request_v1::CollationFetchingResponse::Collation(receipt, pov)) - // } - ; + }; + let response = OutgoingResponse { result, reputation_changes: Vec::new(), sent_feedback: Some(tx) }; @@ -1033,7 +1102,7 @@ async fn handle_incoming_request( }; let mode = per_relay_parent.prospective_parachains_mode; - let collation = match &req { + let collation_with_core = match &req { VersionedCollationRequest::V1(_) if !mode.is_enabled() => per_relay_parent.collations.values_mut().next(), VersionedCollationRequest::V2(req) => @@ -1050,22 +1119,24 @@ async fn handle_incoming_request( return Ok(()) }, }; - let (receipt, pov, parent_head_data) = if let Some(collation) = collation { - collation.status.advance_to_requested(); - ( - collation.receipt.clone(), - collation.pov.clone(), - collation.parent_head_data.clone(), - ) - } else { - gum::warn!( - target: LOG_TARGET, - relay_parent = %relay_parent, - "received a `RequestCollation` for a relay parent we don't have collation stored.", - ); + let (receipt, pov, parent_head_data) = + if let Some(collation_with_core) = collation_with_core { + let collation = collation_with_core.collation_mut(); + collation.status.advance_to_requested(); + ( + collation.receipt.clone(), + collation.pov.clone(), + collation.parent_head_data.clone(), + ) + } else { + gum::warn!( + target: LOG_TARGET, + relay_parent = %relay_parent, + "received a `RequestCollation` for a relay parent we don't have collation stored.", + ); - return Ok(()) - }; + return Ok(()) + }; state.metrics.on_collation_sent_requested(); @@ -1320,7 +1391,9 @@ where .remove(removed) .map(|per_relay_parent| per_relay_parent.collations) .unwrap_or_default(); - for collation in collations.into_values() { + for collation_with_core in collations.into_values() { + let collation = collation_with_core.collation(); + let candidate_hash = collation.receipt.hash(); state.collation_result_senders.remove(&candidate_hash); state.validator_groups_buf.remove_candidate(&candidate_hash); @@ -1457,7 +1530,7 @@ async fn run_inner( continue }; - let next_collation = { + let next_collation_with_core = { let per_relay_parent = match state.per_relay_parent.get(&relay_parent) { Some(per_relay_parent) => per_relay_parent, None => continue, @@ -1477,7 +1550,8 @@ async fn run_inner( } }; - if let Some(collation) = next_collation { + if let Some(collation_with_core) = next_collation_with_core { + let collation = collation_with_core.collation(); let receipt = collation.receipt.clone(); let pov = collation.pov.clone(); let parent_head_data = collation.parent_head_data.clone(); diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs index beda941d37ab..de561e9f77fc 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs @@ -45,9 +45,8 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::{reputation::add_reputation, TimeoutExt}; use polkadot_primitives::{ - vstaging::NodeFeatures, AuthorityDiscoveryId, CollatorPair, ExecutorParams, GroupIndex, - GroupRotationInfo, IndexedVec, ScheduledCore, SessionIndex, SessionInfo, ValidatorId, - ValidatorIndex, + AuthorityDiscoveryId, CollatorPair, ExecutorParams, GroupIndex, GroupRotationInfo, IndexedVec, + NodeFeatures, ScheduledCore, SessionIndex, SessionInfo, ValidatorId, ValidatorIndex, }; use polkadot_primitives_test_helpers::TestCandidateBuilder; use test_helpers::mock::new_leaf; @@ -142,6 +141,21 @@ impl Default for TestState { } impl TestState { + /// Adds a few more scheduled cores to the state for the same para id + /// compared to the default. + #[cfg(feature = "elastic-scaling-experimental")] + pub fn with_elastic_scaling() -> Self { + let mut state = Self::default(); + let para_id = state.para_id; + state + .availability_cores + .push(CoreState::Scheduled(ScheduledCore { para_id, collator: None })); + state + .availability_cores + .push(CoreState::Scheduled(ScheduledCore { para_id, collator: None })); + state + } + fn current_group_validator_indices(&self) -> &[ValidatorIndex] { let core_num = self.availability_cores.len(); let GroupIndex(group_idx) = self.group_rotation_info.group_for_core(CoreIndex(0), core_num); @@ -362,6 +376,7 @@ async fn distribute_collation_with_receipt( pov: pov.clone(), parent_head_data: HeadData(vec![1, 2, 3]), result_sender: None, + core_index: CoreIndex(0), }, ) .await; diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs index 2bb7002a4f47..707053545630 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs @@ -277,6 +277,7 @@ fn distribute_collation_from_implicit_view() { pov: pov.clone(), parent_head_data: HeadData(vec![1, 2, 3]), result_sender: None, + core_index: CoreIndex(0), }, ) .await; @@ -358,6 +359,7 @@ fn distribute_collation_up_to_limit() { pov: pov.clone(), parent_head_data: HeadData(vec![1, 2, 3]), result_sender: None, + core_index: CoreIndex(0), }, ) .await; @@ -372,6 +374,119 @@ fn distribute_collation_up_to_limit() { ) } +/// Tests that collator send the parent head data in +/// case the para is assigned to multiple cores (elastic scaling). +#[test] +#[cfg(feature = "elastic-scaling-experimental")] +fn send_parent_head_data_for_elastic_scaling() { + let test_state = TestState::with_elastic_scaling(); + + let local_peer_id = test_state.local_peer_id; + let collator_pair = test_state.collator_pair.clone(); + + test_harness( + local_peer_id, + collator_pair, + ReputationAggregator::new(|_| true), + |test_harness| async move { + let mut virtual_overseer = test_harness.virtual_overseer; + let req_v1_cfg = test_harness.req_v1_cfg; + let mut req_v2_cfg = test_harness.req_v2_cfg; + + let head_b = Hash::from_low_u64_be(129); + let head_b_num: u32 = 63; + + // Set collating para id. + overseer_send( + &mut virtual_overseer, + CollatorProtocolMessage::CollateOn(test_state.para_id), + ) + .await; + update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; + + let pov_data = PoV { block_data: BlockData(vec![1 as u8]) }; + let candidate = TestCandidateBuilder { + para_id: test_state.para_id, + relay_parent: head_b, + pov_hash: pov_data.hash(), + ..Default::default() + } + .build(); + + let phd = HeadData(vec![1, 2, 3]); + let phdh = phd.hash(); + + distribute_collation_with_receipt( + &mut virtual_overseer, + &test_state, + head_b, + true, + candidate.clone(), + pov_data.clone(), + phdh, + ) + .await; + + let peer = test_state.validator_peer_id[0]; + let validator_id = test_state.current_group_validator_authority_ids()[0].clone(); + connect_peer( + &mut virtual_overseer, + peer, + CollationVersion::V2, + Some(validator_id.clone()), + ) + .await; + expect_declare_msg_v2(&mut virtual_overseer, &test_state, &peer).await; + + send_peer_view_change(&mut virtual_overseer, &peer, vec![head_b]).await; + let hashes: Vec<_> = vec![candidate.hash()]; + expect_advertise_collation_msg(&mut virtual_overseer, &peer, head_b, Some(hashes)) + .await; + + let (pending_response, rx) = oneshot::channel(); + req_v2_cfg + .inbound_queue + .as_mut() + .unwrap() + .send(RawIncomingRequest { + peer, + payload: request_v2::CollationFetchingRequest { + relay_parent: head_b, + para_id: test_state.para_id, + candidate_hash: candidate.hash(), + } + .encode(), + pending_response, + }) + .await + .unwrap(); + + assert_matches!( + rx.await, + Ok(full_response) => { + let response: request_v2::CollationFetchingResponse = + request_v2::CollationFetchingResponse::decode(&mut + full_response.result + .expect("We should have a proper answer").as_ref() + ).expect("Decoding should work"); + assert_matches!( + response, + request_v1::CollationFetchingResponse::CollationWithParentHeadData { + receipt, pov, parent_head_data + } => { + assert_eq!(receipt, candidate); + assert_eq!(pov, pov_data); + assert_eq!(parent_head_data, phd); + } + ); + } + ); + + TestHarness { virtual_overseer, req_v1_cfg, req_v2_cfg } + }, + ) +} + /// Tests that collator correctly handles peer V2 requests. #[test] fn advertise_and_send_collation_by_hash() { diff --git a/polkadot/node/network/collator-protocol/src/collator_side/validators_buffer.rs b/polkadot/node/network/collator-protocol/src/collator_side/validators_buffer.rs index 1533f2eda5a5..fbb3ff4328a5 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/validators_buffer.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/validators_buffer.rs @@ -45,14 +45,22 @@ use futures::FutureExt; use polkadot_node_network_protocol::PeerId; use polkadot_primitives::{AuthorityDiscoveryId, CandidateHash, GroupIndex, SessionIndex}; +/// Elastic scaling: how many candidates per relay chain block the collator supports building. +pub const MAX_CHAINED_CANDIDATES_PER_RCB: NonZeroUsize = match NonZeroUsize::new(3) { + Some(cap) => cap, + None => panic!("max candidates per rcb cannot be zero"), +}; + /// The ring buffer stores at most this many unique validator groups. /// /// This value should be chosen in way that all groups assigned to our para -/// in the view can fit into the buffer. -pub const VALIDATORS_BUFFER_CAPACITY: NonZeroUsize = match NonZeroUsize::new(3) { - Some(cap) => cap, - None => panic!("buffer capacity must be non-zero"), -}; +/// in the view can fit into the buffer multiplied by amount of candidates we support per relay +/// chain block in the case of elastic scaling. +pub const VALIDATORS_BUFFER_CAPACITY: NonZeroUsize = + match NonZeroUsize::new(3 * MAX_CHAINED_CANDIDATES_PER_RCB.get()) { + Some(cap) => cap, + None => panic!("buffer capacity must be non-zero"), + }; /// Unique identifier of a validators group. #[derive(Debug)] diff --git a/polkadot/node/network/dispute-distribution/Cargo.toml b/polkadot/node/network/dispute-distribution/Cargo.toml index 14d59d04f2bf..ff9c302c7314 100644 --- a/polkadot/node/network/dispute-distribution/Cargo.toml +++ b/polkadot/node/network/dispute-distribution/Cargo.toml @@ -10,7 +10,7 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } derive_more = "0.99.17" @@ -31,7 +31,7 @@ indexmap = "2.0.0" [dev-dependencies] async-channel = "1.8.0" -async-trait = "0.1.74" +async-trait = "0.1.79" polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } sp-keyring = { path = "../../../../substrate/primitives/keyring" } sp-tracing = { path = "../../../../substrate/primitives/tracing" } diff --git a/polkadot/node/network/dispute-distribution/src/sender/mod.rs b/polkadot/node/network/dispute-distribution/src/sender/mod.rs index f4acc72318ad..8187f20146c7 100644 --- a/polkadot/node/network/dispute-distribution/src/sender/mod.rs +++ b/polkadot/node/network/dispute-distribution/src/sender/mod.rs @@ -76,7 +76,7 @@ pub struct DisputeSender { /// Value is the hash that was used for the query. active_sessions: HashMap, - /// All ongoing dispute sendings this subsystem is aware of. + /// All ongoing dispute sending this subsystem is aware of. /// /// Using an `IndexMap` so items can be iterated in the order of insertion. disputes: IndexMap>, @@ -105,7 +105,7 @@ struct WaitForActiveDisputesState { #[overseer::contextbounds(DisputeDistribution, prefix = self::overseer)] impl DisputeSender { - /// Create a new `DisputeSender` which can be used to start dispute sendings. + /// Create a new `DisputeSender` which can be used to start dispute sending. pub fn new(tx: NestingSender, metrics: Metrics) -> Self { Self { active_heads: Vec::new(), @@ -362,7 +362,7 @@ async fn get_active_session_indices( runtime: &mut RuntimeInfo, active_heads: &Vec, ) -> Result> { - let mut indeces = HashMap::new(); + let mut indices = HashMap::new(); // Iterate all heads we track as active and fetch the child' session indices. for head in active_heads { let session_index = runtime.get_session_index_for_child(ctx.sender(), *head).await?; @@ -372,9 +372,9 @@ async fn get_active_session_indices( { gum::debug!(target: LOG_TARGET, ?err, ?session_index, "Can't cache SessionInfo"); } - indeces.insert(session_index, *head); + indices.insert(session_index, *head); } - Ok(indeces) + Ok(indices) } /// Retrieve Set of active disputes from the dispute coordinator. diff --git a/polkadot/node/network/dispute-distribution/src/tests/mock.rs b/polkadot/node/network/dispute-distribution/src/tests/mock.rs index e6a49f14c094..ccc050233e84 100644 --- a/polkadot/node/network/dispute-distribution/src/tests/mock.rs +++ b/polkadot/node/network/dispute-distribution/src/tests/mock.rs @@ -163,7 +163,7 @@ pub fn make_dispute_message( let invalid_vote = make_explicit_signed(MOCK_VALIDATORS[invalid_validator.0 as usize], candidate_hash, false); gum::trace!( - "Passed time for invald vote: {:#?}", + "Passed time for invalid vote: {:#?}", Instant::now().saturating_duration_since(before_request) ); DisputeMessage::from_signed_statements( diff --git a/polkadot/node/network/dispute-distribution/src/tests/mod.rs b/polkadot/node/network/dispute-distribution/src/tests/mod.rs index 880d1b18032c..5ad790fb01c2 100644 --- a/polkadot/node/network/dispute-distribution/src/tests/mod.rs +++ b/polkadot/node/network/dispute-distribution/src/tests/mod.rs @@ -57,8 +57,8 @@ use polkadot_node_subsystem_test_helpers::{ subsystem_test_harness, TestSubsystemContextHandle, }; use polkadot_primitives::{ - vstaging::NodeFeatures, AuthorityDiscoveryId, CandidateHash, CandidateReceipt, ExecutorParams, - Hash, SessionIndex, SessionInfo, + AuthorityDiscoveryId, CandidateHash, CandidateReceipt, ExecutorParams, Hash, NodeFeatures, + SessionIndex, SessionInfo, }; use self::mock::{ diff --git a/polkadot/node/network/gossip-support/Cargo.toml b/polkadot/node/network/gossip-support/Cargo.toml index 8d0edc206d72..2d6f2f954c66 100644 --- a/polkadot/node/network/gossip-support/Cargo.toml +++ b/polkadot/node/network/gossip-support/Cargo.toml @@ -22,7 +22,7 @@ polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } polkadot-primitives = { path = "../../../primitives" } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" rand = { version = "0.8.5", default-features = false } rand_chacha = { version = "0.3.1", default-features = false } @@ -37,7 +37,7 @@ sp-authority-discovery = { path = "../../../../substrate/primitives/authority-di polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } assert_matches = "1.4.0" -async-trait = "0.1.74" +async-trait = "0.1.79" parking_lot = "0.12.1" lazy_static = "1.4.0" quickcheck = "1.0.3" diff --git a/polkadot/node/network/gossip-support/src/lib.rs b/polkadot/node/network/gossip-support/src/lib.rs index 9f33cd5d8a31..4dfdd1f7208f 100644 --- a/polkadot/node/network/gossip-support/src/lib.rs +++ b/polkadot/node/network/gossip-support/src/lib.rs @@ -508,7 +508,7 @@ where ); } let pretty = PrettyAuthorities(unconnected_authorities); - gum::info!( + gum::debug!( target: LOG_TARGET, ?connected_ratio, ?absolute_connected, diff --git a/polkadot/node/network/gossip-support/src/tests.rs b/polkadot/node/network/gossip-support/src/tests.rs index 6817c85f98d8..cce78df38f30 100644 --- a/polkadot/node/network/gossip-support/src/tests.rs +++ b/polkadot/node/network/gossip-support/src/tests.rs @@ -122,7 +122,7 @@ impl MockAuthorityDiscovery { self.authorities.lock().clone() } - fn add_more_authorties( + fn add_more_authorities( &self, new_known: Vec, ) -> HashMap> { @@ -720,7 +720,7 @@ fn issues_update_authorities_after_session() { assert!(overseer.recv().timeout(TIMEOUT).await.is_none()); // 4. Connect more authorities except one - let newly_added = authority_discovery_mock.add_more_authorties(unknown_at_session); + let newly_added = authority_discovery_mock.add_more_authorities(unknown_at_session); let mut newly_added_iter = newly_added.iter(); let unconnected_at_last_retry = newly_added_iter .next() diff --git a/polkadot/node/network/protocol/Cargo.toml b/polkadot/node/network/protocol/Cargo.toml index 7efa0b8ca820..81936364897f 100644 --- a/polkadot/node/network/protocol/Cargo.toml +++ b/polkadot/node/network/protocol/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] async-channel = "1.8.0" -async-trait = "0.1.74" +async-trait = "0.1.79" hex = "0.4.3" polkadot-primitives = { path = "../../../primitives" } polkadot-node-primitives = { path = "../../primitives" } @@ -19,8 +19,8 @@ polkadot-node-jaeger = { path = "../../jaeger" } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } sc-network = { path = "../../../../substrate/client/network" } sc-authority-discovery = { path = "../../../../substrate/client/authority-discovery" } -strum = { version = "0.24", features = ["derive"] } -futures = "0.3.21" +strum = { version = "0.26.2", features = ["derive"] } +futures = "0.3.30" thiserror = { workspace = true } fatality = "0.0.6" rand = "0.8" diff --git a/polkadot/node/network/protocol/src/grid_topology.rs b/polkadot/node/network/protocol/src/grid_topology.rs index 3c4372a27a2c..a14d24610722 100644 --- a/polkadot/node/network/protocol/src/grid_topology.rs +++ b/polkadot/node/network/protocol/src/grid_topology.rs @@ -89,7 +89,7 @@ impl SessionGridTopology { SessionGridTopology { shuffled_indices, canonical_shuffling, peer_ids } } - /// Updates the known peer ids for the passed authorithies ids. + /// Updates the known peer ids for the passed authorities ids. pub fn update_authority_ids( &mut self, peer_id: PeerId, @@ -313,7 +313,7 @@ impl SessionGridTopologyEntry { self.topology.is_validator(peer) } - /// Updates the known peer ids for the passed authorithies ids. + /// Updates the known peer ids for the passed authorities ids. pub fn update_authority_ids( &mut self, peer_id: PeerId, @@ -345,7 +345,7 @@ impl SessionGridTopologies { self.inner.get(&session).and_then(|val| val.0.as_ref()) } - /// Updates the known peer ids for the passed authorithies ids. + /// Updates the known peer ids for the passed authorities ids. pub fn update_authority_ids( &mut self, peer_id: PeerId, diff --git a/polkadot/node/network/protocol/src/lib.rs b/polkadot/node/network/protocol/src/lib.rs index 7a0ff9f4fa9a..4dd94b5eac4f 100644 --- a/polkadot/node/network/protocol/src/lib.rs +++ b/polkadot/node/network/protocol/src/lib.rs @@ -871,7 +871,7 @@ pub mod v2 { } /// v3 network protocol types. -/// Purpose is for chaning ApprovalDistributionMessage to +/// Purpose is for changing ApprovalDistributionMessage to /// include more than one assignment and approval in a message. pub mod v3 { use parity_scale_codec::{Decode, Encode}; diff --git a/polkadot/node/network/protocol/src/peer_set.rs b/polkadot/node/network/protocol/src/peer_set.rs index cb329607ad61..d0ae5b4a1bf3 100644 --- a/polkadot/node/network/protocol/src/peer_set.rs +++ b/polkadot/node/network/protocol/src/peer_set.rs @@ -234,7 +234,7 @@ pub enum ValidationVersion { /// The second version. V2 = 2, /// The third version where changes to ApprovalDistributionMessage had been made. - /// The changes are translatable to V2 format untill assignments v2 and approvals + /// The changes are translatable to V2 format until assignments v2 and approvals /// coalescing is enabled through a runtime upgrade. V3 = 3, } diff --git a/polkadot/node/network/protocol/src/request_response/incoming/mod.rs b/polkadot/node/network/protocol/src/request_response/incoming/mod.rs index 445544838672..1d7c4a63e0c3 100644 --- a/polkadot/node/network/protocol/src/request_response/incoming/mod.rs +++ b/polkadot/node/network/protocol/src/request_response/incoming/mod.rs @@ -47,7 +47,7 @@ where Req: IsRequest + Decode + Encode, Req::Response: Encode, { - /// Create configuration for `NetworkConfiguration::request_response_porotocols` and a + /// Create configuration for `NetworkConfiguration::request_response_protocols` and a /// corresponding typed receiver. /// /// This Register that config with substrate networking and receive incoming requests via the diff --git a/polkadot/node/network/protocol/src/request_response/mod.rs b/polkadot/node/network/protocol/src/request_response/mod.rs index 2fb62f56d104..87217bf084fb 100644 --- a/polkadot/node/network/protocol/src/request_response/mod.rs +++ b/polkadot/node/network/protocol/src/request_response/mod.rs @@ -287,7 +287,7 @@ impl Protocol { match self { // Hundreds of validators will start requesting their chunks once they see a candidate // awaiting availability on chain. Given that they will see that block at different - // times (due to network delays), 100 seems big enough to accomodate for "bursts", + // times (due to network delays), 100 seems big enough to accommodate for "bursts", // assuming we can service requests relatively quickly, which would need to be measured // as well. Protocol::ChunkFetchingV1 => 100, diff --git a/polkadot/node/network/protocol/src/request_response/v1.rs b/polkadot/node/network/protocol/src/request_response/v1.rs index ba29b32c4ce0..60eecb69f738 100644 --- a/polkadot/node/network/protocol/src/request_response/v1.rs +++ b/polkadot/node/network/protocol/src/request_response/v1.rs @@ -183,7 +183,7 @@ impl IsRequest for AvailableDataFetchingRequest { pub struct StatementFetchingRequest { /// Data needed to locate and identify the needed statement. pub relay_parent: Hash, - /// Hash of candidate that was used create the `CommitedCandidateRecept`. + /// Hash of candidate that was used create the `CommittedCandidateReceipt`. pub candidate_hash: CandidateHash, } diff --git a/polkadot/node/network/statement-distribution/Cargo.toml b/polkadot/node/network/statement-distribution/Cargo.toml index 01f7d818c1c5..d8ae031cbf36 100644 --- a/polkadot/node/network/statement-distribution/Cargo.toml +++ b/polkadot/node/network/statement-distribution/Cargo.toml @@ -10,7 +10,7 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } polkadot-primitives = { path = "../../../primitives" } diff --git a/polkadot/node/network/statement-distribution/src/error.rs b/polkadot/node/network/statement-distribution/src/error.rs index a712ab6da436..d7f52162fe23 100644 --- a/polkadot/node/network/statement-distribution/src/error.rs +++ b/polkadot/node/network/statement-distribution/src/error.rs @@ -81,6 +81,9 @@ pub enum Error { #[error("Fetching validator groups failed {0:?}")] FetchValidatorGroups(RuntimeApiError), + #[error("Fetching claim queue failed {0:?}")] + FetchClaimQueue(runtime::Error), + #[error("Attempted to share statement when not a validator or not assigned")] InvalidShare, diff --git a/polkadot/node/network/statement-distribution/src/legacy_v1/responder.rs b/polkadot/node/network/statement-distribution/src/legacy_v1/responder.rs index 81e226c4ff89..8d1683759a03 100644 --- a/polkadot/node/network/statement-distribution/src/legacy_v1/responder.rs +++ b/polkadot/node/network/statement-distribution/src/legacy_v1/responder.rs @@ -64,7 +64,7 @@ pub async fn respond( // late, as each requester having the data will help distributing it. // 2. If we take too long, the requests timing out will not yet have had any data sent, thus // we wasted no bandwidth. - // 3. If the queue is full, requestes will get an immediate error instead of running in a + // 3. If the queue is full, requests will get an immediate error instead of running in a // timeout, thus requesters can immediately try another peer and be faster. // // From this perspective we would not want parallel response sending at all, but we don't diff --git a/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs b/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs index 2766ec9815af..7d355cc88725 100644 --- a/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs +++ b/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs @@ -43,7 +43,7 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_test_helpers::mock::{make_ferdie_keystore, new_leaf}; use polkadot_primitives::{ - vstaging::NodeFeatures, ExecutorParams, GroupIndex, Hash, HeadData, Id as ParaId, IndexedVec, + ExecutorParams, GroupIndex, Hash, HeadData, Id as ParaId, IndexedVec, NodeFeatures, SessionInfo, ValidationCode, }; use polkadot_primitives_test_helpers::{ @@ -1494,7 +1494,7 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing( Err(()) => {} ); - // And now the succeding request from peer_b: + // And now the succeeding request from peer_b: let (pending_response, response_rx) = oneshot::channel(); let inner_req = StatementFetchingRequest { relay_parent: metadata.relay_parent, diff --git a/polkadot/node/network/statement-distribution/src/v2/cluster.rs b/polkadot/node/network/statement-distribution/src/v2/cluster.rs index c09916e56201..87cdc389cb35 100644 --- a/polkadot/node/network/statement-distribution/src/v2/cluster.rs +++ b/polkadot/node/network/statement-distribution/src/v2/cluster.rs @@ -430,8 +430,8 @@ impl ClusterTracker { /// /// Normally we should not have pending statements to validators in our cluster, /// but if we do for all validators in our cluster, then we don't participate - /// in backing. Ocasional pending statements are expected if two authorities - /// can't detect each otehr or after restart, where it takes a while to discover + /// in backing. Occasional pending statements are expected if two authorities + /// can't detect each other or after restart, where it takes a while to discover /// the whole network. pub fn warn_if_too_many_pending_statements(&self, parent_hash: Hash) { diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs index 2c9cdba4ea8e..f5a8ec4a2696 100644 --- a/polkadot/node/network/statement-distribution/src/v2/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -46,6 +46,7 @@ use polkadot_node_subsystem_util::{ backing_implicit_view::View as ImplicitView, reputation::ReputationAggregator, runtime::{request_min_backing_votes, ProspectiveParachainsMode}, + vstaging::fetch_claim_queue, }; use polkadot_primitives::{ AuthorityDiscoveryId, CandidateHash, CompactStatement, CoreIndex, CoreState, GroupIndex, @@ -112,7 +113,7 @@ const COST_EXCESSIVE_SECONDED: Rep = Rep::CostMinor("Sent Excessive `Seconded` S const COST_DISABLED_VALIDATOR: Rep = Rep::CostMinor("Sent a statement from a disabled validator"); const COST_UNEXPECTED_MANIFEST_MISSING_KNOWLEDGE: Rep = - Rep::CostMinor("Unexpected Manifest, missing knowlege for relay parent"); + Rep::CostMinor("Unexpected Manifest, missing knowledge for relay parent"); const COST_UNEXPECTED_MANIFEST_DISALLOWED: Rep = Rep::CostMinor("Unexpected Manifest, Peer Disallowed"); const COST_UNEXPECTED_MANIFEST_PEER_UNKNOWN: Rep = @@ -149,10 +150,9 @@ pub(crate) const REQUEST_RETRY_DELAY: Duration = Duration::from_secs(1); struct PerRelayParentState { local_validator: Option, statement_store: StatementStore, - availability_cores: Vec, - group_rotation_info: GroupRotationInfo, seconding_limit: usize, session: SessionIndex, + groups_per_para: HashMap>, } impl PerRelayParentState { @@ -563,11 +563,13 @@ pub(crate) async fn handle_active_leaves_update( activated: &ActivatedLeaf, leaf_mode: ProspectiveParachainsMode, ) -> JfyiErrorResult<()> { - let seconding_limit = match leaf_mode { + let max_candidate_depth = match leaf_mode { ProspectiveParachainsMode::Disabled => return Ok(()), - ProspectiveParachainsMode::Enabled { max_candidate_depth, .. } => max_candidate_depth + 1, + ProspectiveParachainsMode::Enabled { max_candidate_depth, .. } => max_candidate_depth, }; + let seconding_limit = max_candidate_depth + 1; + state .implicit_view .activate_leaf(ctx.sender(), activated.hash) @@ -628,8 +630,8 @@ pub(crate) async fn handle_active_leaves_update( request_min_backing_votes(new_relay_parent, session_index, ctx.sender()).await?; let mut per_session_state = PerSessionState::new(session_info, &state.keystore, minimum_backing_votes); - if let Some(toplogy) = state.unused_topologies.remove(&session_index) { - per_session_state.supply_topology(&toplogy.topology, toplogy.local_index); + if let Some(topology) = state.unused_topologies.remove(&session_index) { + per_session_state.supply_topology(&topology.topology, topology.local_index); } state.per_session.insert(session_index, per_session_state); } @@ -693,15 +695,23 @@ pub(crate) async fn handle_active_leaves_update( } }); + let groups_per_para = determine_groups_per_para( + ctx.sender(), + new_relay_parent, + availability_cores, + group_rotation_info, + max_candidate_depth, + ) + .await; + state.per_relay_parent.insert( new_relay_parent, PerRelayParentState { local_validator, statement_store: StatementStore::new(&per_session.groups), - availability_cores, - group_rotation_info, seconding_limit, session: session_index, + groups_per_para, }, ); } @@ -2126,17 +2136,64 @@ async fn provide_candidate_to_grid( } } -fn group_for_para( - availability_cores: &[CoreState], - group_rotation_info: &GroupRotationInfo, - para_id: ParaId, -) -> Option { - // Note: this won't work well for on-demand parachains as it assumes that core assignments are - // fixed across blocks. - let core_index = availability_cores.iter().position(|c| c.para_id() == Some(para_id)); +// Utility function to populate per relay parent `ParaId` to `GroupIndex` mappings. +async fn determine_groups_per_para( + sender: &mut impl overseer::StatementDistributionSenderTrait, + relay_parent: Hash, + availability_cores: Vec, + group_rotation_info: GroupRotationInfo, + max_candidate_depth: usize, +) -> HashMap> { + let maybe_claim_queue = fetch_claim_queue(sender, relay_parent) + .await + .unwrap_or_else(|err| { + gum::debug!( + target: LOG_TARGET, + ?relay_parent, + ?err, + "determine_groups_per_para: `claim_queue` API not available, falling back to iterating availability cores" + ); + None + }); + + let n_cores = availability_cores.len(); - core_index - .map(|c| group_rotation_info.group_for_core(CoreIndex(c as _), availability_cores.len())) + // Determine the core indices occupied by each para at the current relay parent. To support + // on-demand parachains we also consider the core indices at next block if core has a candidate + // pending availability. + let para_core_indices: Vec<_> = if let Some(claim_queue) = maybe_claim_queue { + claim_queue + .iter_claims_at_depth(0) + .map(|(core_index, para)| (para, core_index)) + .collect() + } else { + availability_cores + .into_iter() + .enumerate() + .filter_map(|(index, core)| match core { + CoreState::Scheduled(scheduled_core) => + Some((scheduled_core.para_id, CoreIndex(index as u32))), + CoreState::Occupied(occupied_core) => + if max_candidate_depth >= 1 { + occupied_core + .next_up_on_available + .map(|scheduled_core| (scheduled_core.para_id, CoreIndex(index as u32))) + } else { + None + }, + CoreState::Free => None, + }) + .collect() + }; + + let mut groups_per_para = HashMap::new(); + // Map from `CoreIndex` to `GroupIndex` and collect as `HashMap`. + for (para, core_index) in para_core_indices { + let group_index = group_rotation_info.group_for_core(core_index, n_cores); + groups_per_para.entry(para).or_insert_with(Vec::new).push(group_index) + } + + groups_per_para } #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] @@ -2192,18 +2249,23 @@ async fn fragment_tree_update_inner( let confirmed_candidate = state.candidates.get_confirmed(&candidate_hash); let prs = state.per_relay_parent.get_mut(&receipt.descriptor().relay_parent); if let (Some(confirmed), Some(prs)) = (confirmed_candidate, prs) { - let group_index = group_for_para( - &prs.availability_cores, - &prs.group_rotation_info, - receipt.descriptor().para_id, - ); - let per_session = state.per_session.get(&prs.session); - if let (Some(per_session), Some(group_index)) = (per_session, group_index) { + let group_index = confirmed.group_index(); + + // Sanity check if group_index is valid for this para at relay parent. + let Some(expected_groups) = prs.groups_per_para.get(&receipt.descriptor().para_id) + else { + continue + }; + if !expected_groups.iter().any(|g| *g == group_index) { + continue + } + + if let Some(per_session) = per_session { send_backing_fresh_statements( ctx, candidate_hash, - group_index, + confirmed.group_index(), &receipt.descriptor().relay_parent, prs, confirmed, @@ -2311,13 +2373,12 @@ async fn handle_incoming_manifest_common<'a, Context>( Some(x) => x, }; - let expected_group = group_for_para( - &relay_parent_state.availability_cores, - &relay_parent_state.group_rotation_info, - para_id, - ); + let Some(expected_groups) = relay_parent_state.groups_per_para.get(¶_id) else { + modify_reputation(reputation, ctx.sender(), peer, COST_MALFORMED_MANIFEST).await; + return None + }; - if expected_group != Some(manifest_summary.claimed_group_index) { + if !expected_groups.iter().any(|g| g == &manifest_summary.claimed_group_index) { modify_reputation(reputation, ctx.sender(), peer, COST_MALFORMED_MANIFEST).await; return None } @@ -3037,13 +3098,11 @@ pub(crate) async fn handle_response( relay_parent_state.session, |v| per_session.session_info.validators.get(v).map(|x| x.clone()), |para, g_index| { - let expected_group = group_for_para( - &relay_parent_state.availability_cores, - &relay_parent_state.group_rotation_info, - para, - ); + let Some(expected_groups) = relay_parent_state.groups_per_para.get(¶) else { + return false + }; - Some(g_index) == expected_group + expected_groups.iter().any(|g| g == &g_index) }, disabled_mask, ); diff --git a/polkadot/node/network/statement-distribution/src/v2/requests.rs b/polkadot/node/network/statement-distribution/src/v2/requests.rs index bbcb268415e6..fe270c8a58e8 100644 --- a/polkadot/node/network/statement-distribution/src/v2/requests.rs +++ b/polkadot/node/network/statement-distribution/src/v2/requests.rs @@ -320,7 +320,7 @@ impl RequestManager { // need for the current node to limit itself to the same amount the // requests, because the requests are going to different nodes anyways. // While looking at https://github.com/paritytech/polkadot-sdk/issues/3314, - // found out that this requests take around 100ms to fullfill, so it + // found out that this requests take around 100ms to fulfill, so it // would make sense to try to request things as early as we can, given // we would need to request it for each candidate, around 25 right now // on kusama. diff --git a/polkadot/node/network/statement-distribution/src/v2/statement_store.rs b/polkadot/node/network/statement-distribution/src/v2/statement_store.rs index 022461e55511..a3b2636d2ffc 100644 --- a/polkadot/node/network/statement-distribution/src/v2/statement_store.rs +++ b/polkadot/node/network/statement-distribution/src/v2/statement_store.rs @@ -292,7 +292,7 @@ impl GroupStatements { mod tests { use super::*; - use polkadot_primitives::v6::{Hash, SigningContext, ValidatorPair}; + use polkadot_primitives::v7::{Hash, SigningContext, ValidatorPair}; use sp_application_crypto::Pair as PairT; #[test] diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs b/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs index a944a9cd6d02..4fb033e08ce3 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs @@ -312,6 +312,66 @@ fn useful_cluster_statement_from_non_cluster_peer_rejected() { }); } +// Both validators in the test are part of backing groups assigned to same parachain +#[test] +fn elastic_scaling_useful_cluster_statement_from_non_cluster_peer_rejected() { + let config = TestConfig { + validator_count: 20, + group_size: 3, + local_validator: LocalRole::Validator, + async_backing_params: None, + }; + + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + + let test_leaf = state.make_dummy_leaf_with_multiple_cores_per_para(relay_parent, 3); + + // Peer A is not in our group, but its group is assigned to same para as we are. + let not_our_group = GroupIndex(1); + + let that_group_validators = state.group_validators(not_our_group, false); + let v_non = that_group_validators[0]; + + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(v_non)].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + activate_leaf(&mut overseer, &test_leaf, &state, true, vec![]).await; + + let statement = state + .sign_statement( + v_non, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) + if p == peer_a && r == COST_UNEXPECTED_STATEMENT_INVALID_SENDER.into() => { } + ); + + overseer + }); +} + #[test] fn statement_from_non_cluster_originator_unexpected() { let config = TestConfig { diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs index 38a12cf32e3b..9d00a92e742b 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs @@ -1829,9 +1829,7 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() { }); } -// Grid statements imported to backing once candidate enters hypothetical frontier. -#[test] -fn grid_statements_imported_to_backing() { +fn inner_grid_statements_imported_to_backing(groups_for_first_para: usize) { let validator_count = 6; let group_size = 3; let config = TestConfig { @@ -1851,9 +1849,12 @@ fn grid_statements_imported_to_backing() { let local_group_index = local_validator.group_index.unwrap(); let other_group = next_group_index(local_group_index, validator_count, group_size); - let other_para = ParaId::from(other_group.0); - let test_leaf = state.make_dummy_leaf(relay_parent); + // Other para is same para for elastic scaling test (groups_for_first_para > 1) + let other_para = ParaId::from((groups_for_first_para == 1) as u32); + + let test_leaf = + state.make_dummy_leaf_with_multiple_cores_per_para(relay_parent, groups_for_first_para); let (candidate, pvd) = make_candidate( relay_parent, @@ -2018,6 +2019,18 @@ fn grid_statements_imported_to_backing() { overseer }); } +// Grid statements imported to backing once candidate enters hypothetical frontier. +#[test] +fn grid_statements_imported_to_backing() { + inner_grid_statements_imported_to_backing(1); +} + +// Grid statements imported to backing once candidate enters hypothetical frontier. +// All statements are for candidates of the same parachain but from different backing groups. +#[test] +fn elastic_scaling_grid_statements_imported_to_backing() { + inner_grid_statements_imported_to_backing(2); +} #[test] fn advertisements_rejected_from_incorrect_peers() { diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs index 82986a0330ec..e98b11079312 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs @@ -177,20 +177,39 @@ impl TestState { } fn make_dummy_leaf(&self, relay_parent: Hash) -> TestLeaf { + self.make_dummy_leaf_with_multiple_cores_per_para(relay_parent, 1) + } + + fn make_dummy_leaf_with_multiple_cores_per_para( + &self, + relay_parent: Hash, + groups_for_first_para: usize, + ) -> TestLeaf { TestLeaf { number: 1, hash: relay_parent, parent_hash: Hash::repeat_byte(0), session: 1, availability_cores: self.make_availability_cores(|i| { - CoreState::Scheduled(ScheduledCore { - para_id: ParaId::from(i as u32), - collator: None, - }) + let para_id = if i < groups_for_first_para { + ParaId::from(0u32) + } else { + ParaId::from(i as u32) + }; + + CoreState::Scheduled(ScheduledCore { para_id, collator: None }) }), disabled_validators: Default::default(), para_data: (0..self.session_info.validator_groups.len()) - .map(|i| (ParaId::from(i as u32), PerParaData::new(1, vec![1, 2, 3].into()))) + .map(|i| { + let para_id = if i < groups_for_first_para { + ParaId::from(0u32) + } else { + ParaId::from(i as u32) + }; + + (para_id, PerParaData::new(1, vec![1, 2, 3].into())) + }) .collect(), minimum_backing_votes: 2, } diff --git a/polkadot/node/overseer/Cargo.toml b/polkadot/node/overseer/Cargo.toml index f91ec80d9440..ef79cfe2f702 100644 --- a/polkadot/node/overseer/Cargo.toml +++ b/polkadot/node/overseer/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] client = { package = "sc-client-api", path = "../../../substrate/client/api" } sp-api = { path = "../../../substrate/primitives/api" } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" parking_lot = "0.12.1" polkadot-node-network-protocol = { path = "../network/protocol" } @@ -23,13 +23,13 @@ polkadot-primitives = { path = "../../primitives" } orchestra = { version = "0.3.5", default-features = false, features = ["futures_channel"] } gum = { package = "tracing-gum", path = "../gum" } sp-core = { path = "../../../substrate/primitives/core" } -async-trait = "0.1.74" +async-trait = "0.1.79" tikv-jemalloc-ctl = { version = "0.5.0", optional = true } [dev-dependencies] metered = { package = "prioritized-metered-channel", version = "0.6.1", default-features = false, features = ["futures_channel"] } sp-core = { path = "../../../substrate/primitives/core" } -futures = { version = "0.3.21", features = ["thread-pool"] } +futures = { version = "0.3.30", features = ["thread-pool"] } femme = "2.2.1" assert_matches = "1.4.0" test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../primitives/test-helpers" } diff --git a/polkadot/node/overseer/src/lib.rs b/polkadot/node/overseer/src/lib.rs index e16a3fd27ab3..167b32a15bc4 100644 --- a/polkadot/node/overseer/src/lib.rs +++ b/polkadot/node/overseer/src/lib.rs @@ -871,7 +871,7 @@ where gum::trace!( target: LOG_TARGET, relay_parent = ?hash, - "Leaf got activated, notifying exterinal listeners" + "Leaf got activated, notifying external listeners" ); for listener in listeners { // it's fine if the listener is no longer interested diff --git a/polkadot/node/overseer/src/tests.rs b/polkadot/node/overseer/src/tests.rs index 0494274367d9..55a6bdb74ba7 100644 --- a/polkadot/node/overseer/src/tests.rs +++ b/polkadot/node/overseer/src/tests.rs @@ -811,7 +811,7 @@ fn test_candidate_validation_msg() -> CandidateValidationMessage { fn test_candidate_backing_msg() -> CandidateBackingMessage { let (sender, _) = oneshot::channel(); - CandidateBackingMessage::GetBackedCandidates(Vec::new(), sender) + CandidateBackingMessage::GetBackedCandidates(Default::default(), sender) } fn test_chain_api_msg() -> ChainApiMessage { diff --git a/polkadot/node/primitives/Cargo.toml b/polkadot/node/primitives/Cargo.toml index b4541bcc346c..a4bbd824e671 100644 --- a/polkadot/node/primitives/Cargo.toml +++ b/polkadot/node/primitives/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] bounded-vec = "0.7" -futures = "0.3.21" +futures = "0.3.30" polkadot-primitives = { path = "../../primitives" } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } sp-core = { path = "../../../substrate/primitives/core" } diff --git a/polkadot/node/primitives/src/approval.rs b/polkadot/node/primitives/src/approval.rs index f2a79e025aff..b73cb4c717db 100644 --- a/polkadot/node/primitives/src/approval.rs +++ b/polkadot/node/primitives/src/approval.rs @@ -382,7 +382,7 @@ pub mod v2 { /// The core index chosen in this cert. core_index: CoreIndex, }, - /// Deprectated assignment. Soon to be removed. + /// Deprecated assignment. Soon to be removed. /// An assignment story based on the VRF that authorized the relay-chain block where the /// candidate was included combined with a sample number. /// diff --git a/polkadot/node/primitives/src/disputes/mod.rs b/polkadot/node/primitives/src/disputes/mod.rs index 768b95f65537..5814ecee44f4 100644 --- a/polkadot/node/primitives/src/disputes/mod.rs +++ b/polkadot/node/primitives/src/disputes/mod.rs @@ -84,7 +84,7 @@ impl CandidateVotes { #[derive(Debug, Clone)] /// Valid candidate votes. /// -/// Prefere backing votes over other votes. +/// Prefer backing votes over other votes. pub struct ValidCandidateVotes { votes: BTreeMap, } @@ -133,7 +133,7 @@ impl ValidCandidateVotes { self.votes.retain(f) } - /// Get all the validator indeces we have votes for. + /// Get all the validator indices we have votes for. pub fn keys( &self, ) -> Bkeys<'_, ValidatorIndex, (ValidDisputeStatementKind, ValidatorSignature)> { diff --git a/polkadot/node/primitives/src/lib.rs b/polkadot/node/primitives/src/lib.rs index d295c21cce1d..b127d87d4ea4 100644 --- a/polkadot/node/primitives/src/lib.rs +++ b/polkadot/node/primitives/src/lib.rs @@ -31,8 +31,8 @@ use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use polkadot_primitives::{ BlakeTwo256, BlockNumber, CandidateCommitments, CandidateHash, CollatorPair, - CommittedCandidateReceipt, CompactStatement, EncodeAs, Hash, HashT, HeadData, Id as ParaId, - PersistedValidationData, SessionIndex, Signed, UncheckedSigned, ValidationCode, + CommittedCandidateReceipt, CompactStatement, CoreIndex, EncodeAs, Hash, HashT, HeadData, + Id as ParaId, PersistedValidationData, SessionIndex, Signed, UncheckedSigned, ValidationCode, ValidationCodeHash, ValidatorIndex, MAX_CODE_SIZE, MAX_POV_SIZE, }; pub use sp_consensus_babe::{ @@ -58,7 +58,7 @@ pub use disputes::{ /// relatively rare. /// /// The associated worker binaries should use the same version as the node that spawns them. -pub const NODE_VERSION: &'static str = "1.8.0"; +pub const NODE_VERSION: &'static str = "1.9.0"; // For a 16-ary Merkle Prefix Trie, we can expect at most 16 32-byte hashes per node // plus some overhead: @@ -74,7 +74,7 @@ pub const VALIDATION_CODE_BOMB_LIMIT: usize = (MAX_CODE_SIZE * 4u32) as usize; pub const POV_BOMB_LIMIT: usize = (MAX_POV_SIZE * 4u32) as usize; /// How many blocks after finalization an information about backed/included candidate should be -/// pre-loaded (when scraoing onchain votes) and kept locally (when pruning). +/// pre-loaded (when scraping onchain votes) and kept locally (when pruning). /// /// We don't want to remove scraped candidates on finalization because we want to /// be sure that disputes will conclude on abandoned forks. @@ -524,6 +524,8 @@ pub struct SubmitCollationParams { /// okay to just drop it. However, if it is called, it should be called with the signed /// statement of a parachain validator seconding the collation. pub result_sender: Option>, + /// The core index on which the resulting candidate should be backed + pub core_index: CoreIndex, } /// This is the data we keep available for each candidate included in the relay chain. diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index 734dcbdeb441..9688ab556473 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -65,7 +65,6 @@ sp-version = { path = "../../../substrate/primitives/version" } # Substrate Pallets pallet-babe = { path = "../../../substrate/frame/babe" } -pallet-im-online = { path = "../../../substrate/frame/im-online" } pallet-staking = { path = "../../../substrate/frame/staking" } pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api" } frame-system = { path = "../../../substrate/frame/system" } @@ -78,8 +77,8 @@ frame-benchmarking-cli = { path = "../../../substrate/utils/frame/benchmarking-c frame-benchmarking = { path = "../../../substrate/frame/benchmarking" } # External Crates -async-trait = "0.1.74" -futures = "0.3.21" +async-trait = "0.1.79" +futures = "0.3.30" hex-literal = "0.4.1" is_executable = "1.0.1" gum = { package = "tracing-gum", path = "../gum" } @@ -142,11 +141,14 @@ polkadot-node-core-pvf-checker = { path = "../core/pvf-checker", optional = true polkadot-node-core-runtime-api = { path = "../core/runtime-api", optional = true } polkadot-statement-distribution = { path = "../network/statement-distribution", optional = true } +xcm = { package = "staging-xcm", path = "../../xcm" } +xcm-fee-payment-runtime-api = { path = "../../xcm/xcm-fee-payment-runtime-api" } + [dev-dependencies] polkadot-test-client = { path = "../test/client" } polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../primitives/test-helpers" } -env_logger = "0.9.0" +env_logger = "0.11" assert_matches = "1.5.0" serial_test = "2.0.0" tempfile = "3.2" @@ -194,7 +196,6 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-babe/runtime-benchmarks", - "pallet-im-online/runtime-benchmarks", "pallet-staking/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", @@ -210,7 +211,6 @@ try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", "pallet-babe/try-runtime", - "pallet-im-online/try-runtime", "pallet-staking/try-runtime", "pallet-transaction-payment/try-runtime", "polkadot-runtime-parachains/try-runtime", @@ -218,10 +218,7 @@ try-runtime = [ "sp-runtime/try-runtime", "westend-runtime?/try-runtime", ] -fast-runtime = [ - "rococo-runtime?/fast-runtime", - "westend-runtime?/fast-runtime", -] +fast-runtime = ["rococo-runtime?/fast-runtime", "westend-runtime?/fast-runtime"] malus = ["full-node"] runtime-metrics = [ @@ -229,3 +226,7 @@ runtime-metrics = [ "rococo-runtime?/runtime-metrics", "westend-runtime?/runtime-metrics", ] + +elastic-scaling-experimental = [ + "polkadot-collator-protocol?/elastic-scaling-experimental", +] diff --git a/polkadot/node/service/chain-specs/kusama.json b/polkadot/node/service/chain-specs/kusama.json index fd60cb8b6c1d..490b39ee6969 100644 --- a/polkadot/node/service/chain-specs/kusama.json +++ b/polkadot/node/service/chain-specs/kusama.json @@ -35,6 +35,7 @@ "/dns/ksm14.rotko.net/tcp/35224/wss/p2p/12D3KooWAa5THTw8HPfnhEei23HdL8P9McBXdozG2oTtMMksjZkK", "/dns/ksm14.rotko.net/tcp/33224/p2p/12D3KooWAa5THTw8HPfnhEei23HdL8P9McBXdozG2oTtMMksjZkK", "/dns/ibp-boot-kusama.luckyfriday.io/tcp/30333/p2p/12D3KooW9vu1GWHBuxyhm7rZgD3fhGZpNajPXFexadvhujWMgwfT", + "/dns/boot-kusama.luckyfriday.io/tcp/443/wss/p2p/12D3KooWS1Lu6DmK8YHSvkErpxpcXmk14vG6y4KVEFEkd9g62PP8", "/dns/ibp-boot-kusama.luckyfriday.io/tcp/30334/wss/p2p/12D3KooW9vu1GWHBuxyhm7rZgD3fhGZpNajPXFexadvhujWMgwfT" ], "telemetryEndpoints": [ diff --git a/polkadot/node/service/chain-specs/polkadot.json b/polkadot/node/service/chain-specs/polkadot.json index c235cdd09d8b..5f8d88102d7e 100644 --- a/polkadot/node/service/chain-specs/polkadot.json +++ b/polkadot/node/service/chain-specs/polkadot.json @@ -36,7 +36,8 @@ "/dns/dot14.rotko.net/tcp/35214/wss/p2p/12D3KooWPyEvPEXghnMC67Gff6PuZiSvfx3fmziKiPZcGStZ5xff", "/dns/dot14.rotko.net/tcp/33214/p2p/12D3KooWPyEvPEXghnMC67Gff6PuZiSvfx3fmziKiPZcGStZ5xff", "/dns/ibp-boot-polkadot.luckyfriday.io/tcp/30333/p2p/12D3KooWEjk6QXrZJ26fLpaajisJGHiz6WiQsR8k7mkM9GmWKnRZ", - "/dns/ibp-boot-polkadot.luckyfriday.io/tcp/30334/wss/p2p/12D3KooWEjk6QXrZJ26fLpaajisJGHiz6WiQsR8k7mkM9GmWKnRZ" + "/dns/ibp-boot-polkadot.luckyfriday.io/tcp/30334/wss/p2p/12D3KooWEjk6QXrZJ26fLpaajisJGHiz6WiQsR8k7mkM9GmWKnRZ", + "/dns/boot-polkadot.luckyfriday.io/tcp/443/wss/p2p/12D3KooWAdyiVAaeGdtBt6vn5zVetwA4z4qfm9Fi2QCSykN1wTBJ" ], "telemetryEndpoints": [ [ diff --git a/polkadot/node/service/src/benchmarking.rs b/polkadot/node/service/src/benchmarking.rs index 400daf1aee34..a0c4d3b04469 100644 --- a/polkadot/node/service/src/benchmarking.rs +++ b/polkadot/node/service/src/benchmarking.rs @@ -222,7 +222,7 @@ fn westend_sign_call( runtime::UncheckedExtrinsic::new_signed( call, sp_runtime::AccountId32::from(acc.public()).into(), - polkadot_core_primitives::Signature::Sr25519(signature.clone()), + polkadot_core_primitives::Signature::Sr25519(signature), extra, ) .into() @@ -274,7 +274,7 @@ fn rococo_sign_call( runtime::UncheckedExtrinsic::new_signed( call, sp_runtime::AccountId32::from(acc.public()).into(), - polkadot_core_primitives::Signature::Sr25519(signature.clone()), + polkadot_core_primitives::Signature::Sr25519(signature), extra, ) .into() diff --git a/polkadot/node/service/src/chain_spec.rs b/polkadot/node/service/src/chain_spec.rs index 1c44b17b6fd2..1b6ba99777b0 100644 --- a/polkadot/node/service/src/chain_spec.rs +++ b/polkadot/node/service/src/chain_spec.rs @@ -123,7 +123,8 @@ fn default_parachains_host_configuration( ) -> polkadot_runtime_parachains::configuration::HostConfiguration { use polkadot_primitives::{ - vstaging::node_features::FeatureIndex, AsyncBackingParams, MAX_CODE_SIZE, MAX_POV_SIZE, + node_features::FeatureIndex, ApprovalVotingParams, AsyncBackingParams, MAX_CODE_SIZE, + MAX_POV_SIZE, }; polkadot_runtime_parachains::configuration::HostConfiguration { @@ -158,7 +159,8 @@ fn default_parachains_host_configuration( allowed_ancestry_len: 2, }, node_features: bitvec::vec::BitVec::from_element( - 1u8 << (FeatureIndex::ElasticScalingMVP as usize), + 1u8 << (FeatureIndex::ElasticScalingMVP as usize) | + 1u8 << (FeatureIndex::EnableAssignmentsV2 as usize), ), scheduler_params: SchedulerParams { lookahead: 2, @@ -166,6 +168,7 @@ fn default_parachains_host_configuration( paras_availability_period: 4, ..Default::default() }, + approval_voting_params: ApprovalVotingParams { max_approval_coalesce_count: 5 }, ..Default::default() } } diff --git a/polkadot/node/service/src/fake_runtime_api.rs b/polkadot/node/service/src/fake_runtime_api.rs index 085ea93fdc78..c6cfb7a27d04 100644 --- a/polkadot/node/service/src/fake_runtime_api.rs +++ b/polkadot/node/service/src/fake_runtime_api.rs @@ -30,6 +30,7 @@ use polkadot_primitives::{ ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; + use sp_core::OpaqueMetadata; use sp_runtime::{ traits::Block as BlockT, @@ -39,7 +40,7 @@ use sp_runtime::{ use sp_version::RuntimeVersion; use sp_weights::Weight; use std::collections::BTreeMap; - +use xcm::{VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm}; sp_api::decl_runtime_apis! { /// This runtime API is only implemented for the test runtime! pub trait GetLastTimestamp { @@ -396,4 +397,22 @@ sp_api::impl_runtime_apis! { unimplemented!() } } + + impl xcm_fee_payment_runtime_api::XcmPaymentApi for Runtime { + fn query_acceptable_payment_assets(_: xcm::Version) -> Result, xcm_fee_payment_runtime_api::Error> { + unimplemented!() + } + + fn query_weight_to_asset_fee(_: Weight, _: VersionedAssetId) -> Result { + unimplemented!() + } + + fn query_xcm_weight(_: VersionedXcm<()>) -> Result { + unimplemented!() + } + + fn query_delivery_fees(_: VersionedLocation, _: VersionedXcm<()>) -> Result { + unimplemented!() + } + } } diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index 8d04e905e848..8a30f19cc36f 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -809,6 +809,7 @@ pub fn new_full( let shared_voter_state = rpc_setup; let auth_disc_publish_non_global_ips = config.network.allow_non_globals_in_dht; + let auth_disc_public_addresses = config.network.public_addresses.clone(); let mut net_config = sc_network::config::FullNetworkConfiguration::new(&config.network); let genesis_hash = client.block_hash(0).ok().flatten().expect("Genesis block exists; qed"); @@ -1063,6 +1064,7 @@ pub fn new_full( let (worker, service) = sc_authority_discovery::new_worker_and_service_with_config( sc_authority_discovery::WorkerConfig { publish_non_global_ips: auth_disc_publish_non_global_ips, + public_addresses: auth_disc_public_addresses, // Require that authority discovery records are signed. strict_record_validation: true, ..Default::default() @@ -1233,6 +1235,7 @@ pub fn new_full( prometheus_registry: prometheus_registry.clone(), links: beefy_links, on_demand_justifications_handler: beefy_on_demand_justifications_handler, + is_authority: role.is_authority(), }; let gadget = beefy::start_beefy_gadget::<_, _, _, _, _, _, _>(beefy_params); @@ -1242,18 +1245,18 @@ pub fn new_full( task_manager .spawn_essential_handle() .spawn_blocking("beefy-gadget", None, gadget); - // When offchain indexing is enabled, MMR gadget should also run. - if is_offchain_indexing_enabled { - task_manager.spawn_essential_handle().spawn_blocking( - "mmr-gadget", - None, - MmrGadget::start( - client.clone(), - backend.clone(), - sp_mmr_primitives::INDEXING_PREFIX.to_vec(), - ), - ); - } + } + // When offchain indexing is enabled, MMR gadget should also run. + if is_offchain_indexing_enabled { + task_manager.spawn_essential_handle().spawn_blocking( + "mmr-gadget", + None, + MmrGadget::start( + client.clone(), + backend.clone(), + sp_mmr_primitives::INDEXING_PREFIX.to_vec(), + ), + ); } let config = grandpa::Config { diff --git a/polkadot/node/service/src/overseer.rs b/polkadot/node/service/src/overseer.rs index a81673704649..9575b2458a25 100644 --- a/polkadot/node/service/src/overseer.rs +++ b/polkadot/node/service/src/overseer.rs @@ -77,7 +77,7 @@ pub struct OverseerGenArgs<'a, Spawner, RuntimeClient> where Spawner: 'static + SpawnNamed + Clone + Unpin, { - /// Runtime client generic, providing the `ProvieRuntimeApi` trait besides others. + /// Runtime client generic, providing the `ProvideRuntimeApi` trait besides others. pub runtime_client: Arc, /// Underlying network service implementation. pub network_service: Arc>, diff --git a/polkadot/node/service/src/parachains_db/upgrade.rs b/polkadot/node/service/src/parachains_db/upgrade.rs index 2eceb391b15c..4d7370859609 100644 --- a/polkadot/node/service/src/parachains_db/upgrade.rs +++ b/polkadot/node/service/src/parachains_db/upgrade.rs @@ -115,7 +115,7 @@ pub(crate) fn try_upgrade_db_to_next_version( Some(CURRENT_VERSION) => CURRENT_VERSION, // This is an arbitrary future version, we don't handle it. Some(v) => return Err(Error::FutureVersion { current: CURRENT_VERSION, got: v }), - // No version file. For `RocksDB` we dont need to do anything. + // No version file. For `RocksDB` we don't need to do anything. None if db_kind == DatabaseKind::RocksDB => CURRENT_VERSION, // No version file. `ParityDB` did not previously have a version defined. // We handle this as a `0 -> 1` migration. @@ -183,7 +183,7 @@ fn migrate_from_version_1_to_2(path: &Path, db_kind: DatabaseKind) -> Result { // The total lag accounting for disputes. let lag_disputes = initial_leaf_number.saturating_sub(subchain_number); diff --git a/polkadot/node/subsystem-bench/Cargo.toml b/polkadot/node/subsystem-bench/Cargo.toml index 71711ad0fbd0..37224d110e88 100644 --- a/polkadot/node/subsystem-bench/Cargo.toml +++ b/polkadot/node/subsystem-bench/Cargo.toml @@ -35,12 +35,12 @@ color-eyre = { version = "0.6.1", default-features = false } polkadot-overseer = { path = "../overseer" } colored = "2.0.4" assert_matches = "1.5" -async-trait = "0.1.57" +async-trait = "0.1.79" sp-keystore = { path = "../../../substrate/primitives/keystore" } sc-keystore = { path = "../../../substrate/client/keystore" } sp-core = { path = "../../../substrate/primitives/core" } clap = { version = "4.5.3", features = ["derive"] } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" bincode = "1.3.3" sha1 = "0.10.6" @@ -48,7 +48,7 @@ hex = "0.4.3" gum = { package = "tracing-gum", path = "../gum" } polkadot-erasure-coding = { package = "polkadot-erasure-coding", path = "../../erasure-coding" } log = { workspace = true, default-features = true } -env_logger = "0.9.0" +env_logger = "0.11" rand = "0.8.5" # `rand` only supports uniform distribution, we need normal distribution for latency. rand_distr = "0.4.3" @@ -56,7 +56,7 @@ bitvec = "1.0.1" kvdb-memorydb = "0.13.0" parity-scale-codec = { version = "3.6.1", features = ["derive", "std"] } -tokio = "1.24.2" +tokio = { version = "1.24.2", features = ["parking_lot", "rt-multi-thread"] } clap-num = "1.0.2" polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } sp-keyring = { path = "../../../substrate/primitives/keyring" } @@ -71,6 +71,7 @@ prometheus_endpoint = { package = "substrate-prometheus-endpoint", path = "../.. prometheus = { version = "0.13.0", default-features = false } serde = { workspace = true, default-features = true } serde_yaml = { workspace = true } +serde_json = { workspace = true } polkadot-node-core-approval-voting = { path = "../core/approval-voting" } polkadot-approval-distribution = { path = "../network/approval-distribution" } diff --git a/polkadot/node/subsystem-bench/grafana/availability-read.json b/polkadot/node/subsystem-bench/grafana/availability-read.json index 31c4ad3c7952..96a83d2d70fe 100644 --- a/polkadot/node/subsystem-bench/grafana/availability-read.json +++ b/polkadot/node/subsystem-bench/grafana/availability-read.json @@ -119,7 +119,7 @@ "editorMode": "code", "expr": "subsystem_benchmark_n_validators{}", "instant": false, - "legendFormat": "n_vaidators", + "legendFormat": "n_validators", "range": true, "refId": "A" }, @@ -1046,7 +1046,7 @@ "refId": "A" } ], - "title": "Availability subystem metrics", + "title": "Availability subsystem metrics", "type": "row" }, { @@ -1397,7 +1397,7 @@ "refId": "B" } ], - "title": "Recovery throughtput", + "title": "Recovery throughput", "transformations": [], "type": "timeseries" }, diff --git a/polkadot/node/subsystem-bench/src/cli/subsystem-bench.rs b/polkadot/node/subsystem-bench/src/cli/subsystem-bench.rs index deb351360d74..10953b6c7839 100644 --- a/polkadot/node/subsystem-bench/src/cli/subsystem-bench.rs +++ b/polkadot/node/subsystem-bench/src/cli/subsystem-bench.rs @@ -136,31 +136,29 @@ impl BenchCli { let usage = match objective { TestObjective::DataAvailabilityRead(opts) => { - let mut state = availability::TestState::new(&test_config); + let state = availability::TestState::new(&test_config); let (mut env, _protocol_config) = availability::prepare_test( - test_config, - &mut state, + &state, availability::TestDataAvailability::Read(opts), true, ); env.runtime().block_on(availability::benchmark_availability_read( &benchmark_name, &mut env, - state, + &state, )) }, TestObjective::DataAvailabilityWrite => { - let mut state = availability::TestState::new(&test_config); + let state = availability::TestState::new(&test_config); let (mut env, _protocol_config) = availability::prepare_test( - test_config, - &mut state, + &state, availability::TestDataAvailability::Write, true, ); env.runtime().block_on(availability::benchmark_availability_write( &benchmark_name, &mut env, - state, + &state, )) }, TestObjective::ApprovalVoting(ref options) => { diff --git a/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs b/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs index c1b31a509f6d..619a3617ca4d 100644 --- a/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs @@ -41,8 +41,8 @@ use polkadot_node_primitives::approval::{ v2::{CoreBitfield, IndirectAssignmentCertV2, IndirectSignedApprovalVoteV2}, }; use polkadot_primitives::{ - vstaging::ApprovalVoteMultipleCandidates, CandidateEvent, CandidateHash, CandidateIndex, - CoreIndex, Hash, SessionInfo, Slot, ValidatorId, ValidatorIndex, ASSIGNMENT_KEY_TYPE_ID, + ApprovalVoteMultipleCandidates, CandidateEvent, CandidateHash, CandidateIndex, CoreIndex, Hash, + SessionInfo, Slot, ValidatorId, ValidatorIndex, ASSIGNMENT_KEY_TYPE_ID, }; use rand::{seq::SliceRandom, RngCore, SeedableRng}; use rand_chacha::ChaCha20Rng; @@ -74,8 +74,8 @@ pub struct PeerMessagesGenerator { pub validator_index: ValidatorIndex, /// An array of pre-generated random samplings, that is used to determine, which nodes would /// send a given assignment, to the node under test because of the random samplings. - /// As an optimization we generate this sampling at the begining of the test and just pick - /// one randomly, because always taking the samples would be too expensive for benchamrk. + /// As an optimization we generate this sampling at the beginning of the test and just pick + /// one randomly, because always taking the samples would be too expensive for benchmark. pub random_samplings: Vec>, /// Channel for sending the generated messages to the aggregator pub tx_messages: futures::channel::mpsc::UnboundedSender<(Hash, Vec)>, @@ -234,7 +234,7 @@ impl PeerMessagesGenerator { let all_messages = all_messages .into_iter() .flat_map(|(_, mut messages)| { - // Shuffle the messages inside the same tick, so that we don't priorites messages + // Shuffle the messages inside the same tick, so that we don't priorities messages // for older nodes. we try to simulate the same behaviour as in real world. messages.shuffle(&mut rand_chacha); messages @@ -560,12 +560,12 @@ struct TestSignInfo { candidate_index: CandidateIndex, /// The validator sending the assignments validator_index: ValidatorIndex, - /// The assignments convering this candidate + /// The assignments covering this candidate assignment: TestMessageInfo, } impl TestSignInfo { - /// Helper function to create a signture for all candidates in `to_sign` parameter. + /// Helper function to create a signature for all candidates in `to_sign` parameter. /// Returns a TestMessage fn sign_candidates( to_sign: &mut Vec, diff --git a/polkadot/node/subsystem-bench/src/lib/approval/mod.rs b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs index 450faf06123f..6ab5b86baede 100644 --- a/polkadot/node/subsystem-bench/src/lib/approval/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs @@ -115,7 +115,7 @@ pub struct ApprovalsOptions { #[clap(short, long, default_value_t = 1.0)] /// Max candidate to be signed in a single approval. pub coalesce_std_dev: f32, - /// The maximum tranche diff between approvals coalesced toghther. + /// The maximum tranche diff between approvals coalesced together. pub coalesce_tranche_diff: u32, #[clap(short, long, default_value_t = false)] /// Enable assignments v2. @@ -170,7 +170,7 @@ struct BlockTestData { total_candidates_before: u64, /// The votes we sent. /// votes[validator_index][candidate_index] tells if validator sent vote for candidate. - /// We use this to mark the test as succesfull if GetApprovalSignatures returns all the votes + /// We use this to mark the test as successful if GetApprovalSignatures returns all the votes /// from here. votes: Arc>>, } @@ -237,7 +237,7 @@ struct GeneratedState { } /// Approval test state used by all mock subsystems to be able to answer messages emitted -/// by the approval-voting and approval-distribution-subystems. +/// by the approval-voting and approval-distribution-subsystems. /// /// This gets cloned across all mock subsystems, so if there is any information that gets /// updated between subsystems, they would have to be wrapped in Arc's. @@ -498,7 +498,7 @@ struct PeerMessageProducer { impl PeerMessageProducer { /// Generates messages by spawning a blocking task in the background which begins creating - /// the assignments/approvals and peer view changes at the begining of each block. + /// the assignments/approvals and peer view changes at the beginning of each block. fn produce_messages( mut self, env: &TestEnvironment, @@ -740,7 +740,7 @@ impl PeerMessageProducer { } } - // Initializes the candidates test data. This is used for bookeeping if more assignments and + // Initializes the candidates test data. This is used for bookkeeping if more assignments and // approvals would be needed. fn initialize_candidates_test_data( &self, @@ -767,7 +767,7 @@ impl PeerMessageProducer { } /// Helper function to build an overseer with the real implementation for `ApprovalDistribution` and -/// `ApprovalVoting` subystems and mock subsytems for all others. +/// `ApprovalVoting` subsystems and mock subsystems for all others. fn build_overseer( state: &ApprovalTestState, network: &NetworkEmulatorHandle, @@ -936,7 +936,7 @@ pub async fn bench_approvals_run( for block_num in 0..env.config().num_blocks { let mut current_slot = tick_to_slot_number(SLOT_DURATION_MILLIS, system_clock.tick_now()); - // Wait untill the time arrieves at the first slot under test. + // Wait until the time arrives at the first slot under test. while current_slot < state.generated_state.initial_slot { sleep(Duration::from_millis(5)).await; current_slot = tick_to_slot_number(SLOT_DURATION_MILLIS, system_clock.tick_now()); @@ -961,7 +961,7 @@ pub async fn bench_approvals_run( } // Wait for all blocks to be approved before exiting. - // This is an invariant of the benchmark, if this does not happen something went teribbly wrong. + // This is an invariant of the benchmark, if this does not happen something went terribly wrong. while state.last_approved_block.load(std::sync::atomic::Ordering::SeqCst) < env.config().num_blocks as u32 { diff --git a/polkadot/node/subsystem-bench/src/lib/approval/test_message.rs b/polkadot/node/subsystem-bench/src/lib/approval/test_message.rs index 63e383509be9..f55ed99205ed 100644 --- a/polkadot/node/subsystem-bench/src/lib/approval/test_message.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/test_message.rs @@ -31,7 +31,7 @@ pub struct TestMessageInfo { pub msg: protocol_v3::ApprovalDistributionMessage, /// The list of peers that would sends this message in a real topology. /// It includes both the peers that would send the message because of the topology - /// or because of randomly chosing so. + /// or because of randomly choosing so. pub sent_by: Vec, /// The tranche at which this message should be sent. pub tranche: u32, @@ -90,7 +90,7 @@ impl MessagesBundle { /// Tells if the bundle is needed for sending. /// We either send it because we need more assignments and approvals to approve the candidates - /// or because we configured the test to send messages untill a given tranche. + /// or because we configured the test to send messages until a given tranche. pub fn should_send( &self, candidates_test_data: &HashMap<(Hash, CandidateIndex), CandidateTestData>, @@ -174,24 +174,24 @@ impl TestMessageInfo { } } - /// Returns a list of candidates indicies in this message + /// Returns a list of candidates indices in this message pub fn candidate_indices(&self) -> HashSet { - let mut unique_candidate_indicies = HashSet::new(); + let mut unique_candidate_indices = HashSet::new(); match &self.msg { protocol_v3::ApprovalDistributionMessage::Assignments(assignments) => for (_assignment, candidate_indices) in assignments { for candidate_index in candidate_indices.iter_ones() { - unique_candidate_indicies.insert(candidate_index); + unique_candidate_indices.insert(candidate_index); } }, protocol_v3::ApprovalDistributionMessage::Approvals(approvals) => for approval in approvals { for candidate_index in approval.candidate_indices.iter_ones() { - unique_candidate_indicies.insert(candidate_index); + unique_candidate_indices.insert(candidate_index); } }, } - unique_candidate_indicies + unique_candidate_indices } /// Marks this message as no-shows if the number of configured no-shows is above the registered diff --git a/polkadot/node/subsystem-bench/src/lib/availability/mod.rs b/polkadot/node/subsystem-bench/src/lib/availability/mod.rs index dc4e1e403102..fe9866690618 100644 --- a/polkadot/node/subsystem-bench/src/lib/availability/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/availability/mod.rs @@ -15,11 +15,11 @@ // along with Polkadot. If not, see . use crate::{ - configuration::TestConfiguration, + availability::av_store_helpers::new_av_store, dummy_builder, environment::{TestEnvironment, TestEnvironmentDependencies, GENESIS_HASH}, mock::{ - av_store::{self, MockAvailabilityStore}, + av_store::{self, MockAvailabilityStore, NetworkAvailabilityState}, chain_api::{ChainApiState, MockChainApi}, network_bridge::{self, MockNetworkBridgeRx, MockNetworkBridgeTx}, runtime_api::{self, MockRuntimeApi}, @@ -28,12 +28,8 @@ use crate::{ network::new_network, usage::BenchmarkUsage, }; -use av_store::NetworkAvailabilityState; -use av_store_helpers::new_av_store; -use bitvec::bitvec; use colored::Colorize; use futures::{channel::oneshot, stream::FuturesUnordered, StreamExt}; -use itertools::Itertools; use parity_scale_codec::Encode; use polkadot_availability_bitfield_distribution::BitfieldDistribution; use polkadot_availability_distribution::{ @@ -43,37 +39,27 @@ use polkadot_availability_recovery::AvailabilityRecoverySubsystem; use polkadot_node_core_av_store::AvailabilityStoreSubsystem; use polkadot_node_metrics::metrics::Metrics; use polkadot_node_network_protocol::{ - request_response::{v1::ChunkFetchingRequest, IncomingRequest, ReqProtocolNames}, - OurView, Versioned, VersionedValidationProtocol, + request_response::{IncomingRequest, ReqProtocolNames}, + OurView, }; -use polkadot_node_primitives::{AvailableData, BlockData, ErasureChunk, PoV}; use polkadot_node_subsystem::{ messages::{AllMessages, AvailabilityRecoveryMessage}, Overseer, OverseerConnector, SpawnGlue, }; -use polkadot_node_subsystem_test_helpers::{ - derive_erasure_chunks_with_proofs_and_root, mock::new_block_import_info, -}; use polkadot_node_subsystem_types::{ messages::{AvailabilityStoreMessage, NetworkBridgeEvent}, Span, }; use polkadot_overseer::{metrics::Metrics as OverseerMetrics, Handle as OverseerHandle}; -use polkadot_primitives::{ - AvailabilityBitfield, BlockNumber, CandidateHash, CandidateReceipt, GroupIndex, Hash, HeadData, - Header, PersistedValidationData, Signed, SigningContext, ValidatorIndex, -}; -use polkadot_primitives_test_helpers::{dummy_candidate_receipt, dummy_hash}; -use sc_network::{ - request_responses::{IncomingRequest as RawIncomingRequest, ProtocolConfig}, - PeerId, -}; +use polkadot_primitives::GroupIndex; +use sc_network::request_responses::{IncomingRequest as RawIncomingRequest, ProtocolConfig}; use sc_service::SpawnTaskHandle; use serde::{Deserialize, Serialize}; -use sp_core::H256; -use std::{collections::HashMap, iter::Cycle, ops::Sub, sync::Arc, time::Instant}; +use std::{ops::Sub, sync::Arc, time::Instant}; +pub use test_state::TestState; mod av_store_helpers; +mod test_state; const LOG_TARGET: &str = "subsystem-bench::availability"; @@ -83,7 +69,7 @@ const LOG_TARGET: &str = "subsystem-bench::availability"; pub struct DataAvailabilityReadOptions { #[clap(short, long, default_value_t = false)] /// Turbo boost AD Read by fetching the full availability datafrom backers first. Saves CPU as - /// we don't need to re-construct from chunks. Tipically this is only faster if nodes have + /// we don't need to re-construct from chunks. Typically this is only faster if nodes have /// enough bandwidth. pub fetch_from_backers: bool, } @@ -149,94 +135,48 @@ fn build_overseer_for_availability_write( (overseer, OverseerHandle::new(raw_handle)) } -/// Takes a test configuration and uses it to create the `TestEnvironment`. pub fn prepare_test( - config: TestConfiguration, - state: &mut TestState, - mode: TestDataAvailability, - with_prometheus_endpoint: bool, -) -> (TestEnvironment, Vec) { - prepare_test_inner( - config, - state, - mode, - TestEnvironmentDependencies::default(), - with_prometheus_endpoint, - ) -} - -fn prepare_test_inner( - config: TestConfiguration, - state: &mut TestState, + state: &TestState, mode: TestDataAvailability, - dependencies: TestEnvironmentDependencies, with_prometheus_endpoint: bool, ) -> (TestEnvironment, Vec) { - // Generate test authorities. - let test_authorities = config.generate_authorities(); - - let mut candidate_hashes: HashMap> = HashMap::new(); - - // Prepare per block candidates. - // Genesis block is always finalized, so we start at 1. - for block_num in 1..=config.num_blocks { - for _ in 0..config.n_cores { - candidate_hashes - .entry(Hash::repeat_byte(block_num as u8)) - .or_default() - .push(state.next_candidate().expect("Cycle iterator")) - } - - // First candidate is our backed candidate. - state.backed_candidates.push( - candidate_hashes - .get(&Hash::repeat_byte(block_num as u8)) - .expect("just inserted above") - .first() - .expect("just inserted above") - .clone(), - ); - } - - let runtime_api = runtime_api::MockRuntimeApi::new( - config.clone(), - test_authorities.clone(), - candidate_hashes, - Default::default(), - Default::default(), - 0, - ); - - let availability_state = NetworkAvailabilityState { - candidate_hashes: state.candidate_hashes.clone(), - available_data: state.available_data.clone(), - chunks: state.chunks.clone(), - }; - - let mut req_cfgs = Vec::new(); - let (collation_req_receiver, collation_req_cfg) = IncomingRequest::get_config_receiver(&ReqProtocolNames::new(GENESIS_HASH, None)); - req_cfgs.push(collation_req_cfg); - let (pov_req_receiver, pov_req_cfg) = IncomingRequest::get_config_receiver(&ReqProtocolNames::new(GENESIS_HASH, None)); - let (chunk_req_receiver, chunk_req_cfg) = IncomingRequest::get_config_receiver(&ReqProtocolNames::new(GENESIS_HASH, None)); - req_cfgs.push(pov_req_cfg); + let req_cfgs = vec![collation_req_cfg, pov_req_cfg]; - let (network, network_interface, network_receiver) = - new_network(&config, &dependencies, &test_authorities, vec![Arc::new(availability_state)]); + let dependencies = TestEnvironmentDependencies::default(); + let availability_state = NetworkAvailabilityState { + candidate_hashes: state.candidate_hashes.clone(), + available_data: state.available_data.clone(), + chunks: state.chunks.clone(), + }; + let (network, network_interface, network_receiver) = new_network( + &state.config, + &dependencies, + &state.test_authorities, + vec![Arc::new(availability_state.clone())], + ); let network_bridge_tx = network_bridge::MockNetworkBridgeTx::new( network.clone(), network_interface.subsystem_sender(), - test_authorities.clone(), + state.test_authorities.clone(), ); - let network_bridge_rx = - network_bridge::MockNetworkBridgeRx::new(network_receiver, Some(chunk_req_cfg.clone())); + network_bridge::MockNetworkBridgeRx::new(network_receiver, Some(chunk_req_cfg)); + + let runtime_api = runtime_api::MockRuntimeApi::new( + state.config.clone(), + state.test_authorities.clone(), + state.candidate_receipts.clone(), + Default::default(), + Default::default(), + 0, + ); let (overseer, overseer_handle) = match &mode { TestDataAvailability::Read(options) => { @@ -271,27 +211,12 @@ fn prepare_test_inner( }, TestDataAvailability::Write => { let availability_distribution = AvailabilityDistributionSubsystem::new( - test_authorities.keyring.keystore(), + state.test_authorities.keyring.keystore(), IncomingRequestReceivers { pov_req_receiver, chunk_req_receiver }, Metrics::try_register(&dependencies.registry).unwrap(), ); - let block_headers = (1..=config.num_blocks) - .map(|block_number| { - ( - Hash::repeat_byte(block_number as u8), - Header { - digest: Default::default(), - number: block_number as BlockNumber, - parent_hash: Default::default(), - extrinsics_root: Default::default(), - state_root: Default::default(), - }, - ) - }) - .collect::>(); - - let chain_api_state = ChainApiState { block_headers }; + let chain_api_state = ChainApiState { block_headers: state.block_headers.clone() }; let chain_api = MockChainApi::new(chain_api_state); let bitfield_distribution = BitfieldDistribution::new(Metrics::try_register(&dependencies.registry).unwrap()); @@ -311,167 +236,42 @@ fn prepare_test_inner( ( TestEnvironment::new( dependencies, - config, + state.config.clone(), network, overseer, overseer_handle, - test_authorities, + state.test_authorities.clone(), with_prometheus_endpoint, ), req_cfgs, ) } -#[derive(Clone)] -pub struct TestState { - // Full test configuration - config: TestConfiguration, - // A cycle iterator on all PoV sizes used in the test. - pov_sizes: Cycle>, - // Generated candidate receipts to be used in the test - candidates: Cycle>, - // Map from pov size to candidate index - pov_size_to_candidate: HashMap, - // Map from generated candidate hashes to candidate index in `available_data` - // and `chunks`. - candidate_hashes: HashMap, - // Per candidate index receipts. - candidate_receipt_templates: Vec, - // Per candidate index `AvailableData` - available_data: Vec, - // Per candiadte index chunks - chunks: Vec>, - // Per relay chain block - candidate backed by our backing group - backed_candidates: Vec, -} - -impl TestState { - pub fn next_candidate(&mut self) -> Option { - let candidate = self.candidates.next(); - let candidate_hash = candidate.as_ref().unwrap().hash(); - gum::trace!(target: LOG_TARGET, "Next candidate selected {:?}", candidate_hash); - candidate - } - - /// Generate candidates to be used in the test. - fn generate_candidates(&mut self) { - let count = self.config.n_cores * self.config.num_blocks; - gum::info!(target: LOG_TARGET,"{}", format!("Pre-generating {} candidates.", count).bright_blue()); - - // Generate all candidates - self.candidates = (0..count) - .map(|index| { - let pov_size = self.pov_sizes.next().expect("This is a cycle; qed"); - let candidate_index = *self - .pov_size_to_candidate - .get(&pov_size) - .expect("pov_size always exists; qed"); - let mut candidate_receipt = - self.candidate_receipt_templates[candidate_index].clone(); - - // Make it unique. - candidate_receipt.descriptor.relay_parent = Hash::from_low_u64_be(index as u64); - // Store the new candidate in the state - self.candidate_hashes.insert(candidate_receipt.hash(), candidate_index); - - gum::debug!(target: LOG_TARGET, candidate_hash = ?candidate_receipt.hash(), "new candidate"); - - candidate_receipt - }) - .collect::>() - .into_iter() - .cycle(); - } - - pub fn new(config: &TestConfiguration) -> Self { - let config = config.clone(); - - let mut chunks = Vec::new(); - let mut available_data = Vec::new(); - let mut candidate_receipt_templates = Vec::new(); - let mut pov_size_to_candidate = HashMap::new(); - - // we use it for all candidates. - let persisted_validation_data = PersistedValidationData { - parent_head: HeadData(vec![7, 8, 9]), - relay_parent_number: Default::default(), - max_pov_size: 1024, - relay_parent_storage_root: Default::default(), - }; - - // For each unique pov we create a candidate receipt. - for (index, pov_size) in config.pov_sizes().iter().cloned().unique().enumerate() { - gum::info!(target: LOG_TARGET, index, pov_size, "{}", "Generating template candidate".bright_blue()); - - let mut candidate_receipt = dummy_candidate_receipt(dummy_hash()); - let pov = PoV { block_data: BlockData(vec![index as u8; pov_size]) }; - - let new_available_data = AvailableData { - validation_data: persisted_validation_data.clone(), - pov: Arc::new(pov), - }; - - let (new_chunks, erasure_root) = derive_erasure_chunks_with_proofs_and_root( - config.n_validators, - &new_available_data, - |_, _| {}, - ); - - candidate_receipt.descriptor.erasure_root = erasure_root; - - chunks.push(new_chunks); - available_data.push(new_available_data); - pov_size_to_candidate.insert(pov_size, index); - candidate_receipt_templates.push(candidate_receipt); - } - - gum::info!(target: LOG_TARGET, "{}","Created test environment.".bright_blue()); - - let mut _self = Self { - available_data, - candidate_receipt_templates, - chunks, - pov_size_to_candidate, - pov_sizes: Vec::from(config.pov_sizes()).into_iter().cycle(), - candidate_hashes: HashMap::new(), - candidates: Vec::new().into_iter().cycle(), - backed_candidates: Vec::new(), - config, - }; - - _self.generate_candidates(); - _self - } - - pub fn backed_candidates(&mut self) -> &mut Vec { - &mut self.backed_candidates - } -} - pub async fn benchmark_availability_read( benchmark_name: &str, env: &mut TestEnvironment, - mut state: TestState, + state: &TestState, ) -> BenchmarkUsage { let config = env.config().clone(); - env.import_block(new_block_import_info(Hash::repeat_byte(1), 1)).await; - - let test_start = Instant::now(); - let mut batch = FuturesUnordered::new(); - let mut availability_bytes = 0u128; - env.metrics().set_n_validators(config.n_validators); env.metrics().set_n_cores(config.n_cores); - for block_num in 1..=env.config().num_blocks { + let mut batch = FuturesUnordered::new(); + let mut availability_bytes = 0u128; + let mut candidates = state.candidates.clone(); + let test_start = Instant::now(); + for block_info in state.block_infos.iter() { + let block_num = block_info.number as usize; gum::info!(target: LOG_TARGET, "Current block {}/{}", block_num, env.config().num_blocks); env.metrics().set_current_block(block_num); let block_start_ts = Instant::now(); + env.import_block(block_info.clone()).await; + for candidate_num in 0..config.n_cores as u64 { let candidate = - state.next_candidate().expect("We always send up to n_cores*num_blocks; qed"); + candidates.next().expect("We always send up to n_cores*num_blocks; qed"); let (tx, rx) = oneshot::channel(); batch.push(rx); @@ -519,7 +319,7 @@ pub async fn benchmark_availability_read( pub async fn benchmark_availability_write( benchmark_name: &str, env: &mut TestEnvironment, - mut state: TestState, + state: &TestState, ) -> BenchmarkUsage { let config = env.config().clone(); @@ -527,7 +327,7 @@ pub async fn benchmark_availability_write( env.metrics().set_n_cores(config.n_cores); gum::info!(target: LOG_TARGET, "Seeding availability store with candidates ..."); - for backed_candidate in state.backed_candidates().clone() { + for backed_candidate in state.backed_candidates.clone() { let candidate_index = *state.candidate_hashes.get(&backed_candidate.hash()).unwrap(); let available_data = state.available_data[candidate_index].clone(); let (tx, rx) = oneshot::channel(); @@ -550,15 +350,14 @@ pub async fn benchmark_availability_write( gum::info!(target: LOG_TARGET, "Done"); let test_start = Instant::now(); - - for block_num in 1..=env.config().num_blocks { + for block_info in state.block_infos.iter() { + let block_num = block_info.number as usize; gum::info!(target: LOG_TARGET, "Current block #{}", block_num); env.metrics().set_current_block(block_num); let block_start_ts = Instant::now(); - let relay_block_hash = Hash::repeat_byte(block_num as u8); - env.import_block(new_block_import_info(relay_block_hash, block_num as BlockNumber)) - .await; + let relay_block_hash = block_info.hash; + env.import_block(block_info.clone()).await; // Inform bitfield distribution about our view of current test block let message = polkadot_node_subsystem_types::messages::BitfieldDistributionMessage::NetworkBridgeUpdate( @@ -569,20 +368,13 @@ pub async fn benchmark_availability_write( let chunk_fetch_start_ts = Instant::now(); // Request chunks of our own backed candidate from all other validators. - let mut receivers = Vec::new(); - for index in 1..config.n_validators { + let payloads = state.chunk_fetching_requests.get(block_num - 1).expect("pregenerated"); + let receivers = (1..config.n_validators).filter_map(|index| { let (pending_response, pending_response_receiver) = oneshot::channel(); - let request = RawIncomingRequest { - peer: PeerId::random(), - payload: ChunkFetchingRequest { - candidate_hash: state.backed_candidates()[block_num - 1].hash(), - index: ValidatorIndex(index as u32), - } - .encode(), - pending_response, - }; - + let peer_id = *env.authorities().peer_ids.get(index).expect("all validators have ids"); + let payload = payloads.get(index).expect("pregenerated").clone(); + let request = RawIncomingRequest { peer: peer_id, payload, pending_response }; let peer = env .authorities() .validator_authority_id @@ -592,59 +384,39 @@ pub async fn benchmark_availability_write( if env.network().is_peer_connected(peer) && env.network().send_request_from_peer(peer, request).is_ok() { - receivers.push(pending_response_receiver); + Some(pending_response_receiver) + } else { + None } - } + }); gum::info!(target: LOG_TARGET, "Waiting for all emulated peers to receive their chunk from us ..."); - for receiver in receivers.into_iter() { - let response = receiver.await.expect("Chunk is always served successfully"); - // TODO: check if chunk is the one the peer expects to receive. - assert!(response.result.is_ok()); - } - let chunk_fetch_duration = Instant::now().sub(chunk_fetch_start_ts).as_millis(); + let responses = futures::future::try_join_all(receivers) + .await + .expect("Chunk is always served successfully"); + // TODO: check if chunk is the one the peer expects to receive. + assert!(responses.iter().all(|v| v.result.is_ok())); + let chunk_fetch_duration = Instant::now().sub(chunk_fetch_start_ts).as_millis(); gum::info!(target: LOG_TARGET, "All chunks received in {}ms", chunk_fetch_duration); - let signing_context = SigningContext { session_index: 0, parent_hash: relay_block_hash }; let network = env.network().clone(); let authorities = env.authorities().clone(); - let n_validators = config.n_validators; - // Spawn a task that will generate `n_validator` - 1 signed bitfiends and + // Spawn a task that will generate `n_validator` - 1 signed bitfields and // send them from the emulated peers to the subsystem. // TODO: Implement topology. - env.spawn_blocking("send-bitfields", async move { - for index in 1..n_validators { - let validator_public = - authorities.validator_public.get(index).expect("All validator keys are known"); - - // Node has all the chunks in the world. - let payload: AvailabilityBitfield = - AvailabilityBitfield(bitvec![u8, bitvec::order::Lsb0; 1u8; 32]); - // TODO(soon): Use pre-signed messages. This is quite intensive on the CPU. - let signed_bitfield = Signed::::sign( - &authorities.keyring.keystore(), - payload, - &signing_context, - ValidatorIndex(index as u32), - validator_public, - ) - .ok() - .flatten() - .expect("should be signed"); - - let from_peer = &authorities.validator_authority_id[index]; - - let message = peer_bitfield_message_v2(relay_block_hash, signed_bitfield); + let messages = state.signed_bitfields.get(&relay_block_hash).expect("pregenerated").clone(); + for index in 1..config.n_validators { + let from_peer = &authorities.validator_authority_id[index]; + let message = messages.get(index).expect("pregenerated").clone(); - // Send the action from peer only if it is connected to our node. - if network.is_peer_connected(from_peer) { - let _ = network.send_message_from_peer(from_peer, message); - } + // Send the action from peer only if it is connected to our node. + if network.is_peer_connected(from_peer) { + let _ = network.send_message_from_peer(from_peer, message); } - }); + } gum::info!( "Waiting for {} bitfields to be received and processed", @@ -653,7 +425,7 @@ pub async fn benchmark_availability_write( // Wait for all bitfields to be processed. env.wait_until_metric( - "polkadot_parachain_received_availabilty_bitfields_total", + "polkadot_parachain_received_availability_bitfields_total", None, |value| value == (config.connected_count() * block_num) as f64, ) @@ -679,17 +451,3 @@ pub async fn benchmark_availability_write( &["availability-distribution", "bitfield-distribution", "availability-store"], ) } - -pub fn peer_bitfield_message_v2( - relay_hash: H256, - signed_bitfield: Signed, -) -> VersionedValidationProtocol { - let bitfield = polkadot_node_network_protocol::v2::BitfieldDistributionMessage::Bitfield( - relay_hash, - signed_bitfield.into(), - ); - - Versioned::V2(polkadot_node_network_protocol::v2::ValidationProtocol::BitfieldDistribution( - bitfield, - )) -} diff --git a/polkadot/node/subsystem-bench/src/lib/availability/test_state.rs b/polkadot/node/subsystem-bench/src/lib/availability/test_state.rs new file mode 100644 index 000000000000..c328ffedf916 --- /dev/null +++ b/polkadot/node/subsystem-bench/src/lib/availability/test_state.rs @@ -0,0 +1,268 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::configuration::{TestAuthorities, TestConfiguration}; +use bitvec::bitvec; +use colored::Colorize; +use itertools::Itertools; +use parity_scale_codec::Encode; +use polkadot_node_network_protocol::{ + request_response::v1::ChunkFetchingRequest, Versioned, VersionedValidationProtocol, +}; +use polkadot_node_primitives::{AvailableData, BlockData, ErasureChunk, PoV}; +use polkadot_node_subsystem_test_helpers::{ + derive_erasure_chunks_with_proofs_and_root, mock::new_block_import_info, +}; +use polkadot_overseer::BlockInfo; +use polkadot_primitives::{ + AvailabilityBitfield, BlockNumber, CandidateHash, CandidateReceipt, Hash, HeadData, Header, + PersistedValidationData, Signed, SigningContext, ValidatorIndex, +}; +use polkadot_primitives_test_helpers::{dummy_candidate_receipt, dummy_hash}; +use sp_core::H256; +use std::{collections::HashMap, iter::Cycle, sync::Arc}; + +const LOG_TARGET: &str = "subsystem-bench::availability::test_state"; + +#[derive(Clone)] +pub struct TestState { + // Full test configuration + pub config: TestConfiguration, + // A cycle iterator on all PoV sizes used in the test. + pub pov_sizes: Cycle>, + // Generated candidate receipts to be used in the test + pub candidates: Cycle>, + // Map from pov size to candidate index + pub pov_size_to_candidate: HashMap, + // Map from generated candidate hashes to candidate index in `available_data` and `chunks`. + pub candidate_hashes: HashMap, + // Per candidate index receipts. + pub candidate_receipt_templates: Vec, + // Per candidate index `AvailableData` + pub available_data: Vec, + // Per candiadte index chunks + pub chunks: Vec>, + // Per relay chain block - candidate backed by our backing group + pub backed_candidates: Vec, + // Relay chain block infos + pub block_infos: Vec, + // Chung fetching requests for backed candidates + pub chunk_fetching_requests: Vec>>, + // Pregenerated signed availability bitfields + pub signed_bitfields: HashMap>, + // Relay chain block headers + pub block_headers: HashMap, + // Authority keys for the network emulation. + pub test_authorities: TestAuthorities, + // Map from generated candidate receipts + pub candidate_receipts: HashMap>, +} + +impl TestState { + pub fn new(config: &TestConfiguration) -> Self { + let mut test_state = Self { + available_data: Default::default(), + candidate_receipt_templates: Default::default(), + chunks: Default::default(), + pov_size_to_candidate: Default::default(), + pov_sizes: Vec::from(config.pov_sizes()).into_iter().cycle(), + candidate_hashes: HashMap::new(), + candidates: Vec::new().into_iter().cycle(), + backed_candidates: Vec::new(), + config: config.clone(), + block_infos: Default::default(), + chunk_fetching_requests: Default::default(), + signed_bitfields: Default::default(), + candidate_receipts: Default::default(), + block_headers: Default::default(), + test_authorities: config.generate_authorities(), + }; + + // we use it for all candidates. + let persisted_validation_data = PersistedValidationData { + parent_head: HeadData(vec![7, 8, 9]), + relay_parent_number: Default::default(), + max_pov_size: 1024, + relay_parent_storage_root: Default::default(), + }; + + // For each unique pov we create a candidate receipt. + for (index, pov_size) in config.pov_sizes().iter().cloned().unique().enumerate() { + gum::info!(target: LOG_TARGET, index, pov_size, "{}", "Generating template candidate".bright_blue()); + + let mut candidate_receipt = dummy_candidate_receipt(dummy_hash()); + let pov = PoV { block_data: BlockData(vec![index as u8; pov_size]) }; + + let new_available_data = AvailableData { + validation_data: persisted_validation_data.clone(), + pov: Arc::new(pov), + }; + + let (new_chunks, erasure_root) = derive_erasure_chunks_with_proofs_and_root( + config.n_validators, + &new_available_data, + |_, _| {}, + ); + + candidate_receipt.descriptor.erasure_root = erasure_root; + + test_state.chunks.push(new_chunks); + test_state.available_data.push(new_available_data); + test_state.pov_size_to_candidate.insert(pov_size, index); + test_state.candidate_receipt_templates.push(candidate_receipt); + } + + test_state.block_infos = (1..=config.num_blocks) + .map(|block_num| { + let relay_block_hash = Hash::repeat_byte(block_num as u8); + new_block_import_info(relay_block_hash, block_num as BlockNumber) + }) + .collect(); + + test_state.block_headers = test_state + .block_infos + .iter() + .map(|info| { + ( + info.hash, + Header { + digest: Default::default(), + number: info.number, + parent_hash: info.parent_hash, + extrinsics_root: Default::default(), + state_root: Default::default(), + }, + ) + }) + .collect::>(); + + // Generate all candidates + let candidates_count = config.n_cores * config.num_blocks; + gum::info!(target: LOG_TARGET,"{}", format!("Pre-generating {} candidates.", candidates_count).bright_blue()); + test_state.candidates = (0..candidates_count) + .map(|index| { + let pov_size = test_state.pov_sizes.next().expect("This is a cycle; qed"); + let candidate_index = *test_state + .pov_size_to_candidate + .get(&pov_size) + .expect("pov_size always exists; qed"); + let mut candidate_receipt = + test_state.candidate_receipt_templates[candidate_index].clone(); + + // Make it unique. + candidate_receipt.descriptor.relay_parent = Hash::from_low_u64_be(index as u64); + // Store the new candidate in the state + test_state.candidate_hashes.insert(candidate_receipt.hash(), candidate_index); + + gum::debug!(target: LOG_TARGET, candidate_hash = ?candidate_receipt.hash(), "new candidate"); + + candidate_receipt + }) + .collect::>() + .into_iter() + .cycle(); + + // Prepare per block candidates. + // Genesis block is always finalized, so we start at 1. + for info in test_state.block_infos.iter() { + for _ in 0..config.n_cores { + let receipt = test_state.candidates.next().expect("Cycle iterator"); + test_state.candidate_receipts.entry(info.hash).or_default().push(receipt); + } + + // First candidate is our backed candidate. + test_state.backed_candidates.push( + test_state + .candidate_receipts + .get(&info.hash) + .expect("just inserted above") + .first() + .expect("just inserted above") + .clone(), + ); + } + + test_state.chunk_fetching_requests = test_state + .backed_candidates + .iter() + .map(|candidate| { + (0..config.n_validators) + .map(|index| { + ChunkFetchingRequest { + candidate_hash: candidate.hash(), + index: ValidatorIndex(index as u32), + } + .encode() + }) + .collect::>() + }) + .collect::>(); + + test_state.signed_bitfields = test_state + .block_infos + .iter() + .map(|block_info| { + let signing_context = + SigningContext { session_index: 0, parent_hash: block_info.hash }; + let messages = (0..config.n_validators) + .map(|index| { + let validator_public = test_state + .test_authorities + .validator_public + .get(index) + .expect("All validator keys are known"); + + // Node has all the chunks in the world. + let payload: AvailabilityBitfield = + AvailabilityBitfield(bitvec![u8, bitvec::order::Lsb0; 1u8; 32]); + let signed_bitfield = Signed::::sign( + &test_state.test_authorities.keyring.keystore(), + payload, + &signing_context, + ValidatorIndex(index as u32), + validator_public, + ) + .ok() + .flatten() + .expect("should be signed"); + + peer_bitfield_message_v2(block_info.hash, signed_bitfield) + }) + .collect::>(); + + (block_info.hash, messages) + }) + .collect(); + + gum::info!(target: LOG_TARGET, "{}","Created test environment.".bright_blue()); + + test_state + } +} + +fn peer_bitfield_message_v2( + relay_hash: H256, + signed_bitfield: Signed, +) -> VersionedValidationProtocol { + let bitfield = polkadot_node_network_protocol::v2::BitfieldDistributionMessage::Bitfield( + relay_hash, + signed_bitfield.into(), + ); + + Versioned::V2(polkadot_node_network_protocol::v2::ValidationProtocol::BitfieldDistribution( + bitfield, + )) +} diff --git a/polkadot/node/subsystem-bench/src/lib/configuration.rs b/polkadot/node/subsystem-bench/src/lib/configuration.rs index 17c81c4fd355..5725a5137ec4 100644 --- a/polkadot/node/subsystem-bench/src/lib/configuration.rs +++ b/polkadot/node/subsystem-bench/src/lib/configuration.rs @@ -122,10 +122,10 @@ pub struct TestConfiguration { /// Randomly sampled pov_sizes #[serde(skip)] pub pov_sizes: Vec, - /// The amount of bandiwdth remote validators have. + /// The amount of bandwidth remote validators have. #[serde(default = "default_bandwidth")] pub peer_bandwidth: usize, - /// The amount of bandiwdth our node has. + /// The amount of bandwidth our node has. #[serde(default = "default_bandwidth")] pub bandwidth: usize, /// Optional peer emulation latency (round trip time) wrt node under test @@ -205,7 +205,7 @@ impl TestConfiguration { let peer_id_to_authority = peer_ids .iter() .zip(validator_authority_id.iter()) - .map(|(peer_id, authorithy_id)| (*peer_id, authorithy_id.clone())) + .map(|(peer_id, authority_id)| (*peer_id, authority_id.clone())) .collect(); TestAuthorities { diff --git a/polkadot/node/subsystem-bench/src/lib/environment.rs b/polkadot/node/subsystem-bench/src/lib/environment.rs index 958ed50d0894..42955d030223 100644 --- a/polkadot/node/subsystem-bench/src/lib/environment.rs +++ b/polkadot/node/subsystem-bench/src/lib/environment.rs @@ -118,6 +118,7 @@ fn new_runtime() -> tokio::runtime::Runtime { .thread_name("subsystem-bench") .enable_all() .thread_stack_size(3 * 1024 * 1024) + .worker_threads(4) .build() .unwrap() } @@ -403,7 +404,7 @@ impl TestEnvironment { let total_cpu = test_env_cpu_metrics.sum_by("substrate_tasks_polling_duration_sum"); usage.push(ResourceUsage { - resource_name: "Test environment".to_string(), + resource_name: "test-environment".to_string(), total: total_cpu, per_block: total_cpu / num_blocks, }); diff --git a/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs b/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs index 9064f17940f0..fba33523be85 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs @@ -39,8 +39,9 @@ pub struct AvailabilityStoreState { const LOG_TARGET: &str = "subsystem-bench::av-store-mock"; -/// Mockup helper. Contains Ccunks and full availability data of all parachain blocks +/// Mockup helper. Contains Chunks and full availability data of all parachain blocks /// used in a test. +#[derive(Clone)] pub struct NetworkAvailabilityState { pub candidate_hashes: HashMap, pub available_data: Vec, diff --git a/polkadot/node/subsystem-bench/src/lib/mock/chain_api.rs b/polkadot/node/subsystem-bench/src/lib/mock/chain_api.rs index bee15c3cefdf..86b030fb6fdc 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/chain_api.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/chain_api.rs @@ -89,7 +89,7 @@ impl MockChainApi { let hash = self .state .get_header_by_number(requested_number) - .expect("Unknow block number") + .expect("Unknown block number") .hash(); sender.send(Ok(Some(hash))).unwrap(); }, diff --git a/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs b/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs index d598f6447d3d..ec66ad4e279c 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs @@ -42,8 +42,8 @@ pub struct MockNetworkBridgeTx { network: NetworkEmulatorHandle, /// A channel to the network interface, to_network_interface: UnboundedSender, - /// Test authorithies - test_authorithies: TestAuthorities, + /// Test authorities + test_authorities: TestAuthorities, } /// A mock of the network bridge tx subsystem. @@ -58,9 +58,9 @@ impl MockNetworkBridgeTx { pub fn new( network: NetworkEmulatorHandle, to_network_interface: UnboundedSender, - test_authorithies: TestAuthorities, + test_authorities: TestAuthorities, ) -> MockNetworkBridgeTx { - Self { network, to_network_interface, test_authorithies } + Self { network, to_network_interface, test_authorities } } } @@ -125,13 +125,13 @@ impl MockNetworkBridgeTx { } }, NetworkBridgeTxMessage::ReportPeer(_) => { - // ingore rep changes + // ignore rep changes }, NetworkBridgeTxMessage::SendValidationMessage(peers, message) => { for peer in peers { self.to_network_interface .unbounded_send(NetworkMessage::MessageFromNode( - self.test_authorithies + self.test_authorities .peer_id_to_authority .get(&peer) .unwrap() diff --git a/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs b/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs index 53faf562f03c..b73d61321cd3 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs @@ -26,7 +26,7 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_types::OverseerSignal; use polkadot_primitives::{ - vstaging::NodeFeatures, CandidateEvent, CandidateReceipt, CoreState, GroupIndex, IndexedVec, + CandidateEvent, CandidateReceipt, CoreState, GroupIndex, IndexedVec, NodeFeatures, OccupiedCore, SessionIndex, SessionInfo, ValidatorIndex, }; use sp_consensus_babe::Epoch as BabeEpoch; @@ -36,6 +36,7 @@ use std::collections::HashMap; const LOG_TARGET: &str = "subsystem-bench::runtime-api-mock"; /// Minimal state to answer requests. +#[derive(Clone)] pub struct RuntimeApiState { // All authorities in the test, authorities: TestAuthorities, @@ -49,6 +50,7 @@ pub struct RuntimeApiState { } /// A mocked `runtime-api` subsystem. +#[derive(Clone)] pub struct MockRuntimeApi { state: RuntimeApiState, config: TestConfiguration, diff --git a/polkadot/node/subsystem-bench/src/lib/network.rs b/polkadot/node/subsystem-bench/src/lib/network.rs index 1bc35a014ff5..0f7b7d741e77 100644 --- a/polkadot/node/subsystem-bench/src/lib/network.rs +++ b/polkadot/node/subsystem-bench/src/lib/network.rs @@ -154,7 +154,7 @@ pub enum NetworkMessage { MessageFromNode(AuthorityDiscoveryId, VersionedValidationProtocol), /// A request originating from our node RequestFromNode(AuthorityDiscoveryId, Requests), - /// A request originating from an emultated peer + /// A request originating from an emulated peer RequestFromPeer(IncomingRequest), } @@ -790,9 +790,9 @@ pub fn new_network( let connected_count = config.connected_count(); - let mut peers_indicies = (0..n_peers).collect_vec(); + let mut peers_indices = (0..n_peers).collect_vec(); let (_connected, to_disconnect) = - peers_indicies.partial_shuffle(&mut thread_rng(), connected_count); + peers_indices.partial_shuffle(&mut thread_rng(), connected_count); // Node under test is always mark as disconnected. peers[NODE_UNDER_TEST as usize].disconnect(); @@ -958,7 +958,7 @@ impl Metrics { .inc_by(bytes as u64); } - /// Increment total receioved for a peer. + /// Increment total received for a peer. pub fn on_peer_received(&self, peer_index: usize, bytes: usize) { self.peer_total_received .with_label_values(vec![format!("node{}", peer_index).as_str()].as_slice()) @@ -1041,7 +1041,7 @@ mod tests { async fn test_expected_rate() { let tick_rate = 200; let budget = 1_000_000; - // rate must not exceeed 100 credits per second + // rate must not exceed 100 credits per second let mut rate_limiter = RateLimit::new(tick_rate, budget); let mut total_sent = 0usize; let start = Instant::now(); diff --git a/polkadot/node/subsystem-bench/src/lib/usage.rs b/polkadot/node/subsystem-bench/src/lib/usage.rs index ef60d67372ae..59296746ec3d 100644 --- a/polkadot/node/subsystem-bench/src/lib/usage.rs +++ b/polkadot/node/subsystem-bench/src/lib/usage.rs @@ -17,6 +17,7 @@ //! Test usage implementation use colored::Colorize; +use itertools::Itertools; use serde::{Deserialize, Serialize}; use std::collections::HashMap; @@ -37,10 +38,16 @@ impl std::fmt::Display for BenchmarkUsage { self.network_usage .iter() .map(|v| v.to_string()) + .sorted() .collect::>() .join("\n"), format!("{:<32}{:>12}{:>12}", "CPU usage, seconds", "total", "per block").blue(), - self.cpu_usage.iter().map(|v| v.to_string()).collect::>().join("\n") + self.cpu_usage + .iter() + .map(|v| v.to_string()) + .sorted() + .collect::>() + .join("\n") ) } } @@ -75,6 +82,27 @@ impl BenchmarkUsage { _ => None, } } + + // Prepares a json string for a graph representation + // See: https://github.com/benchmark-action/github-action-benchmark?tab=readme-ov-file#examples + pub fn to_chart_json(&self) -> color_eyre::eyre::Result { + let chart = self + .network_usage + .iter() + .map(|v| ChartItem { + name: v.resource_name.clone(), + unit: "KiB".to_string(), + value: v.per_block, + }) + .chain(self.cpu_usage.iter().map(|v| ChartItem { + name: v.resource_name.clone(), + unit: "seconds".to_string(), + value: v.per_block, + })) + .collect::>(); + + Ok(serde_json::to_string(&chart)?) + } } fn check_usage( @@ -101,8 +129,8 @@ fn check_resource_usage( None } else { Some(format!( - "The resource `{}` is expected to be equal to {} with a precision {}, but the current value is {}", - resource_name, base, precision, usage.per_block + "The resource `{}` is expected to be equal to {} with a precision {}, but the current value is {} ({})", + resource_name, base, precision, usage.per_block, diff )) } } else { @@ -119,7 +147,7 @@ pub struct ResourceUsage { impl std::fmt::Display for ResourceUsage { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{:<32}{:>12.3}{:>12.3}", self.resource_name.cyan(), self.total, self.per_block) + write!(f, "{:<32}{:>12.4}{:>12.4}", self.resource_name.cyan(), self.total, self.per_block) } } @@ -144,3 +172,10 @@ impl ResourceUsage { } type ResourceUsageCheck<'a> = (&'a str, f64, f64); + +#[derive(Debug, Serialize)] +pub struct ChartItem { + pub name: String, + pub unit: String, + pub value: f64, +} diff --git a/polkadot/node/subsystem-bench/src/lib/utils.rs b/polkadot/node/subsystem-bench/src/lib/utils.rs index 75b72cc11b98..b3cd3a88b6c1 100644 --- a/polkadot/node/subsystem-bench/src/lib/utils.rs +++ b/polkadot/node/subsystem-bench/src/lib/utils.rs @@ -16,61 +16,26 @@ //! Test utils -use crate::usage::BenchmarkUsage; -use std::io::{stdout, Write}; - -pub struct WarmUpOptions<'a> { - /// The maximum number of runs considered for marming up. - pub warm_up: usize, - /// The number of runs considered for benchmarking. - pub bench: usize, - /// The difference in CPU usage between runs considered as normal - pub precision: f64, - /// The subsystems whose CPU usage is checked during warm-up cycles - pub subsystems: &'a [&'a str], -} - -impl<'a> WarmUpOptions<'a> { - pub fn new(subsystems: &'a [&'a str]) -> Self { - Self { warm_up: 100, bench: 3, precision: 0.02, subsystems } - } -} - -pub fn warm_up_and_benchmark( - options: WarmUpOptions, - run: impl Fn() -> BenchmarkUsage, -) -> Result { - println!("Warming up..."); - let mut usages = Vec::with_capacity(options.bench); - - for n in 1..=options.warm_up { - let curr = run(); - if let Some(prev) = usages.last() { - let diffs = options - .subsystems - .iter() - .map(|&v| { - curr.cpu_usage_diff(prev, v) - .ok_or(format!("{} not found in benchmark {:?}", v, prev)) - }) - .collect::, String>>()?; - if !diffs.iter().all(|&v| v < options.precision) { - usages.clear(); - } - } - usages.push(curr); - print!("\r{}%", n * 100 / options.warm_up); - if usages.len() == options.bench { - println!("\rTook {} runs to warm up", n.saturating_sub(options.bench)); - break; - } - stdout().flush().unwrap(); - } - - if usages.len() != options.bench { - println!("Didn't warm up after {} runs", options.warm_up); - return Err("Can't warm up".to_string()) +use std::{fs::File, io::Write}; + +// Saves a given string to a file +pub fn save_to_file(path: &str, value: String) -> color_eyre::eyre::Result<()> { + let output = std::process::Command::new(env!("CARGO")) + .arg("locate-project") + .arg("--workspace") + .arg("--message-format=plain") + .output() + .unwrap() + .stdout; + let workspace_dir = std::path::Path::new(std::str::from_utf8(&output).unwrap().trim()) + .parent() + .unwrap(); + let path = workspace_dir.join(path); + if let Some(dir) = path.parent() { + std::fs::create_dir_all(dir)?; } + let mut file = File::create(path)?; + file.write_all(value.as_bytes())?; - Ok(BenchmarkUsage::average(&usages)) + Ok(()) } diff --git a/polkadot/node/subsystem-test-helpers/Cargo.toml b/polkadot/node/subsystem-test-helpers/Cargo.toml index c71f030568d9..57678e8e8d4a 100644 --- a/polkadot/node/subsystem-test-helpers/Cargo.toml +++ b/polkadot/node/subsystem-test-helpers/Cargo.toml @@ -11,8 +11,8 @@ license.workspace = true workspace = true [dependencies] -async-trait = "0.1.74" -futures = "0.3.21" +async-trait = "0.1.79" +futures = "0.3.30" parking_lot = "0.12.1" polkadot-node-subsystem = { path = "../subsystem" } polkadot-erasure-coding = { path = "../../erasure-coding" } diff --git a/polkadot/node/subsystem-types/Cargo.toml b/polkadot/node/subsystem-types/Cargo.toml index 54c8f7e2ade7..101907763877 100644 --- a/polkadot/node/subsystem-types/Cargo.toml +++ b/polkadot/node/subsystem-types/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] derive_more = "0.99.17" -futures = "0.3.21" +futures = "0.3.30" polkadot-primitives = { path = "../../primitives" } polkadot-node-primitives = { path = "../primitives" } polkadot-node-network-protocol = { path = "../network/protocol" } @@ -29,5 +29,5 @@ sc-transaction-pool-api = { path = "../../../substrate/client/transaction-pool/a smallvec = "1.8.0" substrate-prometheus-endpoint = { path = "../../../substrate/utils/prometheus" } thiserror = { workspace = true } -async-trait = "0.1.74" +async-trait = "0.1.79" bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index eeb684149c80..2ca6728af012 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -42,20 +42,18 @@ use polkadot_node_primitives::{ ValidationResult, }; use polkadot_primitives::{ - async_backing, slashing, - vstaging::{ApprovalVotingParams, NodeFeatures}, - AuthorityDiscoveryId, BackedCandidate, BlockNumber, CandidateEvent, CandidateHash, - CandidateIndex, CandidateReceipt, CollatorId, CommittedCandidateReceipt, CoreState, - DisputeState, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, HeadData, - Header as BlockHeader, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, - MultiDisputeStatementSet, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, - PvfExecKind, SessionIndex, SessionInfo, SignedAvailabilityBitfield, - SignedAvailabilityBitfields, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, - ValidatorSignature, + async_backing, slashing, ApprovalVotingParams, AuthorityDiscoveryId, BackedCandidate, + BlockNumber, CandidateEvent, CandidateHash, CandidateIndex, CandidateReceipt, CollatorId, + CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, ExecutorParams, GroupIndex, + GroupRotationInfo, Hash, HeadData, Header as BlockHeader, Id as ParaId, InboundDownwardMessage, + InboundHrmpMessage, MultiDisputeStatementSet, NodeFeatures, OccupiedCoreAssumption, + PersistedValidationData, PvfCheckStatement, PvfExecKind, SessionIndex, SessionInfo, + SignedAvailabilityBitfield, SignedAvailabilityBitfields, ValidationCode, ValidationCodeHash, + ValidatorId, ValidatorIndex, ValidatorSignature, }; use polkadot_statement_table::v2::Misbehavior; use std::{ - collections::{BTreeMap, HashMap, HashSet}, + collections::{BTreeMap, HashMap, HashSet, VecDeque}, sync::Arc, }; @@ -82,8 +80,15 @@ pub struct CanSecondRequest { pub enum CandidateBackingMessage { /// Requests a set of backable candidates attested by the subsystem. /// - /// Each pair is (candidate_hash, candidate_relay_parent). - GetBackedCandidates(Vec<(CandidateHash, Hash)>, oneshot::Sender>), + /// The order of candidates of the same para must be preserved in the response. + /// If a backed candidate of a para cannot be retrieved, the response should not contain any + /// candidates of the same para that follow it in the input vector. In other words, assuming + /// candidates are supplied in dependency order, we must ensure that this dependency order is + /// preserved. + GetBackedCandidates( + HashMap>, + oneshot::Sender>>, + ), /// Request the subsystem to check whether it's allowed to second given candidate. /// The rule is to only fetch collations that are either built on top of the root /// of some fragment tree or have a parent node which represents backed candidate. @@ -221,6 +226,8 @@ pub enum CollatorProtocolMessage { /// The result sender should be informed when at least one parachain validator seconded the /// collation. It is also completely okay to just drop the sender. result_sender: Option>, + /// The core index where the candidate should be backed. + core_index: CoreIndex, }, /// Report a collator as having provided an invalid collation. This should lead to disconnect /// and blacklist of the collator. @@ -729,6 +736,9 @@ pub enum RuntimeApiRequest { /// Approval voting params /// `V10` ApprovalVotingParams(SessionIndex, RuntimeApiSender), + /// Fetch the `ClaimQueue` from scheduler pallet + /// `V11` + ClaimQueue(RuntimeApiSender>>), } impl RuntimeApiRequest { @@ -763,6 +773,9 @@ impl RuntimeApiRequest { /// `approval_voting_params` pub const APPROVAL_VOTING_PARAMS_REQUIREMENT: u32 = 10; + + /// `ClaimQueue` + pub const CLAIM_QUEUE_RUNTIME_REQUIREMENT: u32 = 11; } /// A message to the Runtime API subsystem. @@ -792,7 +805,7 @@ pub enum StatementDistributionMessage { /// This data becomes intrinsics or extrinsics which should be included in a future relay chain /// block. -// It needs to be cloneable because multiple potential block authors can request copies. +// It needs to be clonable because multiple potential block authors can request copies. #[derive(Debug, Clone)] pub enum ProvisionableData { /// This bitfield indicates the availability of various candidate blocks. @@ -1114,7 +1127,7 @@ pub struct ProspectiveValidationDataRequest { } /// The parent head-data hash with optional data itself. -#[derive(Debug)] +#[derive(Debug, Clone)] pub enum ParentHeadData { /// Parent head-data hash. OnlyHash(Hash), @@ -1127,6 +1140,16 @@ pub enum ParentHeadData { }, } +impl ParentHeadData { + /// Return the hash of the parent head-data. + pub fn hash(&self) -> Hash { + match self { + ParentHeadData::OnlyHash(hash) => *hash, + ParentHeadData::WithData { hash, .. } => *hash, + } + } +} + /// Indicates the relay-parents whose fragment tree a candidate /// is present in and the depths of that tree the candidate is present in. pub type FragmentTreeMembership = Vec<(Hash, Vec)>; diff --git a/polkadot/node/subsystem-types/src/runtime_client.rs b/polkadot/node/subsystem-types/src/runtime_client.rs index 4039fc9127da..664d10ed1af5 100644 --- a/polkadot/node/subsystem-types/src/runtime_client.rs +++ b/polkadot/node/subsystem-types/src/runtime_client.rs @@ -16,13 +16,10 @@ use async_trait::async_trait; use polkadot_primitives::{ - async_backing, - runtime_api::ParachainHost, - slashing, - vstaging::{self, ApprovalVotingParams}, - Block, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, - Header, Id, InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, + async_backing, runtime_api::ParachainHost, slashing, ApprovalVotingParams, Block, BlockNumber, + CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreIndex, + CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, Header, Id, + InboundDownwardMessage, InboundHrmpMessage, NodeFeatures, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; @@ -33,7 +30,10 @@ use sp_authority_discovery::AuthorityDiscoveryApi; use sp_blockchain::{BlockStatus, Info}; use sp_consensus_babe::{BabeApi, Epoch}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; -use std::{collections::BTreeMap, sync::Arc}; +use std::{ + collections::{BTreeMap, VecDeque}, + sync::Arc, +}; /// Offers header utilities. /// @@ -320,7 +320,7 @@ pub trait RuntimeApiSubsystemClient { // === v9 === /// Get the node features. - async fn node_features(&self, at: Hash) -> Result; + async fn node_features(&self, at: Hash) -> Result; // == v10: Approval voting params == /// Approval voting configuration parameters @@ -329,6 +329,10 @@ pub trait RuntimeApiSubsystemClient { at: Hash, session_index: SessionIndex, ) -> Result; + + // == v11: Claim queue == + /// Fetch the `ClaimQueue` from scheduler pallet + async fn claim_queue(&self, at: Hash) -> Result>, ApiError>; } /// Default implementation of [`RuntimeApiSubsystemClient`] using the client. @@ -578,7 +582,7 @@ where self.client.runtime_api().async_backing_params(at) } - async fn node_features(&self, at: Hash) -> Result { + async fn node_features(&self, at: Hash) -> Result { self.client.runtime_api().node_features(at) } @@ -594,6 +598,10 @@ where ) -> Result { self.client.runtime_api().approval_voting_params(at) } + + async fn claim_queue(&self, at: Hash) -> Result>, ApiError> { + self.client.runtime_api().claim_queue(at) + } } impl HeaderBackend for DefaultSubsystemClient diff --git a/polkadot/node/subsystem-util/Cargo.toml b/polkadot/node/subsystem-util/Cargo.toml index a668f8de76a0..79a6a75e4cdf 100644 --- a/polkadot/node/subsystem-util/Cargo.toml +++ b/polkadot/node/subsystem-util/Cargo.toml @@ -10,8 +10,8 @@ license.workspace = true workspace = true [dependencies] -async-trait = "0.1.74" -futures = "0.3.21" +async-trait = "0.1.79" +futures = "0.3.30" futures-channel = "0.3.23" itertools = "0.10" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } @@ -44,8 +44,8 @@ parity-db = { version = "0.4.12" } [dev-dependencies] assert_matches = "1.4.0" -env_logger = "0.9.0" -futures = { version = "0.3.21", features = ["thread-pool"] } +env_logger = "0.11" +futures = { version = "0.3.30", features = ["thread-pool"] } log = { workspace = true, default-features = true } polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } lazy_static = "1.4.0" diff --git a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs index c7b91bffb3d7..d38d838fedef 100644 --- a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs +++ b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs @@ -78,7 +78,7 @@ /// /// 2. The root fragment is invalid under the new constraints because it has been subsumed by /// the relay-chain. In this case, we can discard the root and split & re-root the fragment -/// tree under its descendents and compare to the new constraints again. This is the +/// tree under its descendants and compare to the new constraints again. This is the /// "prediction came true" case. /// /// 3. The root fragment is invalid under the new constraints because a competing parachain diff --git a/polkadot/node/subsystem-util/src/lib.rs b/polkadot/node/subsystem-util/src/lib.rs index f13beb3502fc..83b046f0bf0a 100644 --- a/polkadot/node/subsystem-util/src/lib.rs +++ b/polkadot/node/subsystem-util/src/lib.rs @@ -30,7 +30,7 @@ use polkadot_node_subsystem::{ messages::{RuntimeApiMessage, RuntimeApiRequest, RuntimeApiSender}, overseer, SubsystemSender, }; -use polkadot_primitives::{slashing, ExecutorParams}; +use polkadot_primitives::{async_backing::BackingState, slashing, CoreIndex, ExecutorParams}; pub use overseer::{ gen::{OrchestraError as OverseerError, Timeout}, @@ -53,7 +53,10 @@ pub use rand; use sp_application_crypto::AppCrypto; use sp_core::ByteArray; use sp_keystore::{Error as KeystoreError, KeystorePtr}; -use std::time::Duration; +use std::{ + collections::{BTreeMap, VecDeque}, + time::Duration, +}; use thiserror::Error; use vstaging::get_disabled_validators_with_fallback; @@ -304,6 +307,8 @@ specialize_requests! { fn request_submit_report_dispute_lost(dp: slashing::DisputeProof, okop: slashing::OpaqueKeyOwnershipProof) -> Option<()>; SubmitReportDisputeLost; fn request_disabled_validators() -> Vec; DisabledValidators; fn request_async_backing_params() -> AsyncBackingParams; AsyncBackingParams; + fn request_claim_queue() -> BTreeMap>; ClaimQueue; + fn request_para_backing_state(para_id: ParaId) -> Option; ParaBackingState; } /// Requests executor parameters from the runtime effective at given relay-parent. First obtains @@ -378,7 +383,7 @@ pub fn signing_key_and_index<'a>( /// Sign the given data with the given validator ID. /// -/// Returns `Ok(None)` if the private key that correponds to that validator ID is not found in the +/// Returns `Ok(None)` if the private key that corresponds to that validator ID is not found in the /// given keystore. Returns an error if the key could not be used for signing. pub fn sign( keystore: &KeystorePtr, diff --git a/polkadot/node/subsystem-util/src/runtime/mod.rs b/polkadot/node/subsystem-util/src/runtime/mod.rs index 481625acb321..714384b32e37 100644 --- a/polkadot/node/subsystem-util/src/runtime/mod.rs +++ b/polkadot/node/subsystem-util/src/runtime/mod.rs @@ -30,12 +30,11 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_types::UnpinHandle; use polkadot_primitives::{ - slashing, - vstaging::{node_features::FeatureIndex, NodeFeatures}, - AsyncBackingParams, CandidateEvent, CandidateHash, CoreState, EncodeAs, ExecutorParams, - GroupIndex, GroupRotationInfo, Hash, IndexedVec, OccupiedCore, ScrapedOnChainVotes, - SessionIndex, SessionInfo, Signed, SigningContext, UncheckedSigned, ValidationCode, - ValidationCodeHash, ValidatorId, ValidatorIndex, LEGACY_MIN_BACKING_VOTES, + node_features::FeatureIndex, slashing, AsyncBackingParams, CandidateEvent, CandidateHash, + CoreState, EncodeAs, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, IndexedVec, + NodeFeatures, OccupiedCore, ScrapedOnChainVotes, SessionIndex, SessionInfo, Signed, + SigningContext, UncheckedSigned, ValidationCode, ValidationCodeHash, ValidatorId, + ValidatorIndex, LEGACY_MIN_BACKING_VOTES, }; use crate::{ diff --git a/polkadot/node/subsystem-util/src/vstaging.rs b/polkadot/node/subsystem-util/src/vstaging.rs index 3e807eff5387..b166a54f75c4 100644 --- a/polkadot/node/subsystem-util/src/vstaging.rs +++ b/polkadot/node/subsystem-util/src/vstaging.rs @@ -19,14 +19,45 @@ //! This module is intended to contain common boiler plate code handling unreleased runtime API //! calls. +use std::collections::{BTreeMap, VecDeque}; + use polkadot_node_subsystem_types::messages::{RuntimeApiMessage, RuntimeApiRequest}; use polkadot_overseer::SubsystemSender; -use polkadot_primitives::{Hash, ValidatorIndex}; +use polkadot_primitives::{CoreIndex, Hash, Id as ParaId, ValidatorIndex}; -use crate::{has_required_runtime, request_disabled_validators, runtime}; +use crate::{has_required_runtime, request_claim_queue, request_disabled_validators, runtime}; const LOG_TARGET: &'static str = "parachain::subsystem-util-vstaging"; +/// A snapshot of the runtime claim queue at an arbitrary relay chain block. +#[derive(Default)] +pub struct ClaimQueueSnapshot(BTreeMap>); + +impl From>> for ClaimQueueSnapshot { + fn from(claim_queue_snapshot: BTreeMap>) -> Self { + ClaimQueueSnapshot(claim_queue_snapshot) + } +} + +impl ClaimQueueSnapshot { + /// Returns the `ParaId` that has a claim for `core_index` at the specified `depth` in the + /// claim queue. A depth of `0` means the very next block. + pub fn get_claim_for(&self, core_index: CoreIndex, depth: usize) -> Option { + self.0.get(&core_index)?.get(depth).copied() + } + + /// Returns an iterator over all claimed cores and the claiming `ParaId` at the specified + /// `depth` in the claim queue. + pub fn iter_claims_at_depth( + &self, + depth: usize, + ) -> impl Iterator + '_ { + self.0 + .iter() + .filter_map(move |(core_index, paras)| Some((*core_index, *paras.get(depth)?))) + } +} + // TODO: https://github.com/paritytech/polkadot-sdk/issues/1940 /// Returns disabled validators list if the runtime supports it. Otherwise logs a debug messages and /// returns an empty vec. @@ -54,3 +85,28 @@ pub async fn get_disabled_validators_with_fallback, + relay_parent: Hash, +) -> Result, runtime::Error> { + if has_required_runtime( + sender, + relay_parent, + RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT, + ) + .await + { + let res = request_claim_queue(relay_parent, sender) + .await + .await + .map_err(runtime::Error::RuntimeRequestCanceled)??; + Ok(Some(res.into())) + } else { + gum::trace!(target: LOG_TARGET, "Runtime doesn't support `request_claim_queue`"); + Ok(None) + } +} diff --git a/polkadot/node/test/client/Cargo.toml b/polkadot/node/test/client/Cargo.toml index 36748c3b455b..7db00404eb8e 100644 --- a/polkadot/node/test/client/Cargo.toml +++ b/polkadot/node/test/client/Cargo.toml @@ -38,7 +38,7 @@ frame-benchmarking = { path = "../../../../substrate/frame/benchmarking" } [dev-dependencies] sp-keyring = { path = "../../../../substrate/primitives/keyring" } -futures = "0.3.21" +futures = "0.3.30" [features] runtime-benchmarks = [ diff --git a/polkadot/node/test/service/Cargo.toml b/polkadot/node/test/service/Cargo.toml index e7892abcd87b..48a206f23c66 100644 --- a/polkadot/node/test/service/Cargo.toml +++ b/polkadot/node/test/service/Cargo.toml @@ -10,13 +10,13 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" hex = "0.4.3" gum = { package = "tracing-gum", path = "../../gum" } rand = "0.8.5" serde_json = { workspace = true, default-features = true } tempfile = "3.2.0" -tokio = "1.24.2" +tokio = "1.37" # Polkadot dependencies polkadot-overseer = { path = "../../overseer" } @@ -63,7 +63,7 @@ substrate-test-client = { path = "../../../../substrate/test-utils/client" } [dev-dependencies] pallet-balances = { path = "../../../../substrate/frame/balances", default-features = false } substrate-test-utils = { path = "../../../../substrate/test-utils" } -tokio = { version = "1.24.2", features = ["macros"] } +tokio = { version = "1.37", features = ["macros"] } [features] runtime-metrics = ["polkadot-test-runtime/runtime-metrics"] diff --git a/polkadot/node/test/service/src/lib.rs b/polkadot/node/test/service/src/lib.rs index ca904bae28db..eed11e62c21e 100644 --- a/polkadot/node/test/service/src/lib.rs +++ b/polkadot/node/test/service/src/lib.rs @@ -410,7 +410,7 @@ pub fn construct_extrinsic( UncheckedExtrinsic::new_signed( function.clone(), polkadot_test_runtime::Address::Id(caller.public().into()), - polkadot_primitives::Signature::Sr25519(signature.clone()), + polkadot_primitives::Signature::Sr25519(signature), extra.clone(), ) } diff --git a/polkadot/parachain/Cargo.toml b/polkadot/parachain/Cargo.toml index 035777b073bf..ad2338787260 100644 --- a/polkadot/parachain/Cargo.toml +++ b/polkadot/parachain/Cargo.toml @@ -14,7 +14,7 @@ workspace = true # this crate for WASM. This is critical to avoid forcing all parachain WASM into implementing # various unnecessary Substrate-specific endpoints. parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } sp-runtime = { path = "../../substrate/primitives/runtime", default-features = false, features = ["serde"] } sp-core = { path = "../../substrate/primitives/core", default-features = false, features = ["serde"] } sp-weights = { path = "../../substrate/primitives/weights", default-features = false } diff --git a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml index cb283c271191..5a2b54057414 100644 --- a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml @@ -17,14 +17,14 @@ path = "src/main.rs" [dependencies] parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } clap = { version = "4.5.3", features = ["derive"] } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" log = { workspace = true, default-features = true } test-parachain-adder = { path = ".." } polkadot-primitives = { path = "../../../../primitives" } polkadot-cli = { path = "../../../../cli" } -polkadot-service = { path = "../../../../node/service", features = ["rococo-native"] } +polkadot-service = { path = "../../../../node/service", features = ["elastic-scaling-experimental", "rococo-native"] } polkadot-node-primitives = { path = "../../../../node/primitives" } polkadot-node-subsystem = { path = "../../../../node/subsystem" } diff --git a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml index 238b98a66801..cacf7304f90a 100644 --- a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml @@ -17,14 +17,14 @@ path = "src/main.rs" [dependencies] parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } clap = { version = "4.5.3", features = ["derive"] } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" log = { workspace = true, default-features = true } test-parachain-undying = { path = ".." } polkadot-primitives = { path = "../../../../primitives" } polkadot-cli = { path = "../../../../cli" } -polkadot-service = { path = "../../../../node/service", features = ["rococo-native"] } +polkadot-service = { path = "../../../../node/service", features = ["elastic-scaling-experimental", "rococo-native"] } polkadot-node-primitives = { path = "../../../../node/primitives" } polkadot-node-subsystem = { path = "../../../../node/subsystem" } diff --git a/polkadot/primitives/Cargo.toml b/polkadot/primitives/Cargo.toml index 7f26dc27ea36..4f959cf18896 100644 --- a/polkadot/primitives/Cargo.toml +++ b/polkadot/primitives/Cargo.toml @@ -13,7 +13,7 @@ workspace = true bitvec = { version = "1.0.0", default-features = false, features = ["alloc", "serde"] } hex-literal = "0.4.1" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive", "serde"] } log = { workspace = true, default-features = false } serde = { features = ["alloc", "derive"], workspace = true } diff --git a/polkadot/primitives/src/lib.rs b/polkadot/primitives/src/lib.rs index e7353eced88e..e9a332f4144f 100644 --- a/polkadot/primitives/src/lib.rs +++ b/polkadot/primitives/src/lib.rs @@ -22,7 +22,7 @@ extern crate alloc; // `v6` is currently the latest stable version of the runtime API. -pub mod v6; +pub mod v7; // The 'staging' version is special - it contains primitives which are // still in development. Once they are considered stable, they will be @@ -35,33 +35,35 @@ pub mod runtime_api; // Current primitives not requiring versioning are exported here. // Primitives requiring versioning must not be exported and must be referred by an exact version. -pub use v6::{ +pub use v7::{ async_backing, byzantine_threshold, check_candidate_backing, collator_signature_payload, - effective_minimum_backing_votes, executor_params, metric_definitions, slashing, + effective_minimum_backing_votes, executor_params, metric_definitions, node_features, slashing, supermajority_threshold, well_known_keys, AbridgedHostConfiguration, AbridgedHrmpChannel, - AccountId, AccountIndex, AccountPublic, ApprovalVote, AssignmentId, AsyncBackingParams, - AuthorityDiscoveryId, AvailabilityBitfield, BackedCandidate, Balance, BlakeTwo256, Block, - BlockId, BlockNumber, CandidateCommitments, CandidateDescriptor, CandidateEvent, CandidateHash, - CandidateIndex, CandidateReceipt, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, - CollatorId, CollatorSignature, CommittedCandidateReceipt, CompactStatement, ConsensusLog, - CoreIndex, CoreState, DisputeState, DisputeStatement, DisputeStatementSet, DownwardMessage, - EncodeAs, ExecutorParam, ExecutorParamError, ExecutorParams, ExecutorParamsHash, + AccountId, AccountIndex, AccountPublic, ApprovalVote, ApprovalVoteMultipleCandidates, + ApprovalVotingParams, AssignmentId, AsyncBackingParams, AuthorityDiscoveryId, + AvailabilityBitfield, BackedCandidate, Balance, BlakeTwo256, Block, BlockId, BlockNumber, + CandidateCommitments, CandidateDescriptor, CandidateEvent, CandidateHash, CandidateIndex, + CandidateReceipt, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, CollatorId, + CollatorSignature, CommittedCandidateReceipt, CompactStatement, ConsensusLog, CoreIndex, + CoreState, DisputeState, DisputeStatement, DisputeStatementSet, DownwardMessage, EncodeAs, + ExecutorParam, ExecutorParamError, ExecutorParams, ExecutorParamsHash, ExplicitDisputeStatement, GroupIndex, GroupRotationInfo, Hash, HashT, HeadData, Header, HorizontalMessages, HrmpChannelId, Id, InboundDownwardMessage, InboundHrmpMessage, IndexedVec, - InherentData, InvalidDisputeStatementKind, Moment, MultiDisputeStatementSet, Nonce, - OccupiedCore, OccupiedCoreAssumption, OutboundHrmpMessage, ParathreadClaim, ParathreadEntry, - PersistedValidationData, PvfCheckStatement, PvfExecKind, PvfPrepKind, RuntimeMetricLabel, - RuntimeMetricLabelValue, RuntimeMetricLabelValues, RuntimeMetricLabels, RuntimeMetricOp, - RuntimeMetricUpdate, ScheduledCore, ScrapedOnChainVotes, SessionIndex, SessionInfo, Signature, - Signed, SignedAvailabilityBitfield, SignedAvailabilityBitfields, SignedStatement, - SigningContext, Slot, UncheckedSigned, UncheckedSignedAvailabilityBitfield, + InherentData, InvalidDisputeStatementKind, Moment, MultiDisputeStatementSet, NodeFeatures, + Nonce, OccupiedCore, OccupiedCoreAssumption, OutboundHrmpMessage, ParathreadClaim, + ParathreadEntry, PersistedValidationData, PvfCheckStatement, PvfExecKind, PvfPrepKind, + RuntimeMetricLabel, RuntimeMetricLabelValue, RuntimeMetricLabelValues, RuntimeMetricLabels, + RuntimeMetricOp, RuntimeMetricUpdate, ScheduledCore, ScrapedOnChainVotes, SessionIndex, + SessionInfo, Signature, Signed, SignedAvailabilityBitfield, SignedAvailabilityBitfields, + SignedStatement, SigningContext, Slot, UncheckedSigned, UncheckedSignedAvailabilityBitfield, UncheckedSignedAvailabilityBitfields, UncheckedSignedStatement, UpgradeGoAhead, UpgradeRestriction, UpwardMessage, ValidDisputeStatementKind, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, ValidityError, ASSIGNMENT_KEY_TYPE_ID, LEGACY_MIN_BACKING_VOTES, LOWEST_PUBLIC_ID, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, MIN_CODE_SIZE, - ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, PARACHAINS_INHERENT_IDENTIFIER, PARACHAIN_KEY_TYPE_ID, + ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, ON_DEMAND_MAX_QUEUE_MAX_SIZE, PARACHAINS_INHERENT_IDENTIFIER, + PARACHAIN_KEY_TYPE_ID, }; #[cfg(feature = "std")] -pub use v6::{AssignmentPair, CollatorPair, ValidatorPair}; +pub use v7::{AssignmentPair, CollatorPair, ValidatorPair}; diff --git a/polkadot/primitives/src/runtime_api.rs b/polkadot/primitives/src/runtime_api.rs index 836cb370eae1..d30a3b63b74f 100644 --- a/polkadot/primitives/src/runtime_api.rs +++ b/polkadot/primitives/src/runtime_api.rs @@ -114,10 +114,9 @@ //! separated from the stable primitives. use crate::{ - async_backing, slashing, - vstaging::{self, ApprovalVotingParams}, - AsyncBackingParams, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, + async_backing, slashing, ApprovalVotingParams, AsyncBackingParams, BlockNumber, + CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreIndex, + CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, NodeFeatures, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidatorId, ValidatorIndex, ValidatorSignature, }; @@ -125,6 +124,10 @@ use crate::{ use alloc::{collections::btree_map::BTreeMap, vec::Vec}; use polkadot_core_primitives as pcp; use polkadot_parachain_primitives::primitives as ppp; +use sp_std::{ + collections::{btree_map::BTreeMap, vec_deque::VecDeque}, + prelude::*, +}; sp_api::decl_runtime_apis! { /// The API for querying the state of parachains on-chain. @@ -275,11 +278,16 @@ sp_api::decl_runtime_apis! { /// Get node features. /// This is a staging method! Do not use on production runtimes! #[api_version(9)] - fn node_features() -> vstaging::NodeFeatures; + fn node_features() -> NodeFeatures; /***** Added in v10 *****/ /// Approval voting configuration parameters #[api_version(10)] fn approval_voting_params() -> ApprovalVotingParams; + + /***** Added in v11 *****/ + /// Claim queue + #[api_version(11)] + fn claim_queue() -> BTreeMap>; } } diff --git a/polkadot/primitives/src/v6/async_backing.rs b/polkadot/primitives/src/v7/async_backing.rs similarity index 100% rename from polkadot/primitives/src/v6/async_backing.rs rename to polkadot/primitives/src/v7/async_backing.rs diff --git a/polkadot/primitives/src/v6/executor_params.rs b/polkadot/primitives/src/v7/executor_params.rs similarity index 100% rename from polkadot/primitives/src/v6/executor_params.rs rename to polkadot/primitives/src/v7/executor_params.rs diff --git a/polkadot/primitives/src/v6/metrics.rs b/polkadot/primitives/src/v7/metrics.rs similarity index 100% rename from polkadot/primitives/src/v6/metrics.rs rename to polkadot/primitives/src/v7/metrics.rs diff --git a/polkadot/primitives/src/v6/mod.rs b/polkadot/primitives/src/v7/mod.rs similarity index 97% rename from polkadot/primitives/src/v6/mod.rs rename to polkadot/primitives/src/v7/mod.rs index f4031b48035e..a5a84273537f 100644 --- a/polkadot/primitives/src/v6/mod.rs +++ b/polkadot/primitives/src/v7/mod.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! `V6` Primitives. +//! `V7` Primitives. use alloc::{ vec, @@ -402,6 +402,13 @@ pub const MAX_POV_SIZE: u32 = 5 * 1024 * 1024; /// Can be adjusted in configuration. pub const ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE: u32 = 10_000; +/// Maximum for maximum queue size. +/// +/// Setting `on_demand_queue_max_size` to a value higher than this is unsound. This is more a +/// theoretical limit, just below enough what the target type supports, so comparisons are possible +/// even with indices that are overflowing the underyling type. +pub const ON_DEMAND_MAX_QUEUE_MAX_SIZE: u32 = 1_000_000_000; + /// Backing votes threshold used from the host prior to runtime API version 6 and from the runtime /// prior to v9 configuration migration. pub const LEGACY_MIN_BACKING_VOTES: u32 = 2; @@ -1180,6 +1187,32 @@ impl<'a> ApprovalVoteMultipleCandidates<'a> { } } +/// Approval voting configuration parameters +#[derive( + RuntimeDebug, + Copy, + Clone, + PartialEq, + Encode, + Decode, + TypeInfo, + serde::Serialize, + serde::Deserialize, +)] +pub struct ApprovalVotingParams { + /// The maximum number of candidates `approval-voting` can vote for with + /// a single signatures. + /// + /// Setting it to 1, means we send the approval as soon as we have it available. + pub max_approval_coalesce_count: u32, +} + +impl Default for ApprovalVotingParams { + fn default() -> Self { + Self { max_approval_coalesce_count: 1 } + } +} + /// Custom validity errors used in Polkadot while validating transactions. #[repr(u8)] pub enum ValidityError { @@ -1453,7 +1486,7 @@ pub enum ValidDisputeStatementKind { #[codec(index = 3)] ApprovalChecking, /// An approval vote from the new version. - /// We can't create this version untill all nodes + /// We can't create this version until all nodes /// have been updated to support it and max_approval_coalesce_count /// is set to more than 1. #[codec(index = 4)] @@ -1600,7 +1633,7 @@ impl ValidityAttestation { pub fn to_compact_statement(&self, candidate_hash: CandidateHash) -> CompactStatement { // Explicit and implicit map directly from // `ValidityVote::Valid` and `ValidityVote::Issued`, and hence there is a - // `1:1` relationshow which enables the conversion. + // `1:1` relationship which enables the conversion. match *self { ValidityAttestation::Implicit(_) => CompactStatement::Seconded(candidate_hash), ValidityAttestation::Explicit(_) => CompactStatement::Valid(candidate_hash), @@ -1943,6 +1976,29 @@ pub enum PvfExecKind { Approval, } +/// Bit indices in the `HostConfiguration.node_features` that correspond to different node features. +pub type NodeFeatures = BitVec; + +/// Module containing feature-specific bit indices into the `NodeFeatures` bitvec. +pub mod node_features { + /// A feature index used to identify a bit into the node_features array stored + /// in the HostConfiguration. + #[repr(u8)] + pub enum FeatureIndex { + /// Tells if tranch0 assignments could be sent in a single certificate. + /// Reserved for: `` + EnableAssignmentsV2 = 0, + /// This feature enables the extension of `BackedCandidate::validator_indices` by 8 bits. + /// The value stored there represents the assumed core index where the candidates + /// are backed. This is needed for the elastic scaling MVP. + ElasticScalingMVP = 1, + /// First unassigned feature bit. + /// Every time a new feature flag is assigned it should take this value. + /// and this should be incremented. + FirstUnassigned = 2, + } +} + #[cfg(test)] mod tests { use super::*; @@ -1956,11 +2012,11 @@ mod tests { descriptor: CandidateDescriptor { para_id: 0.into(), relay_parent: zeros, - collator: CollatorId::from(sr25519::Public::from_raw([0; 32])), + collator: CollatorId::from(sr25519::Public::default()), persisted_validation_data_hash: zeros, pov_hash: zeros, erasure_root: zeros, - signature: CollatorSignature::from(sr25519::Signature([0u8; 64])), + signature: CollatorSignature::from(sr25519::Signature::default()), para_head: zeros, validation_code_hash: ValidationCode(vec![1, 2, 3, 4, 5, 6, 7, 8, 9]).hash(), }, diff --git a/polkadot/primitives/src/v6/signed.rs b/polkadot/primitives/src/v7/signed.rs similarity index 100% rename from polkadot/primitives/src/v6/signed.rs rename to polkadot/primitives/src/v7/signed.rs diff --git a/polkadot/primitives/src/v6/slashing.rs b/polkadot/primitives/src/v7/slashing.rs similarity index 100% rename from polkadot/primitives/src/v6/slashing.rs rename to polkadot/primitives/src/v7/slashing.rs diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs index 85d89ba76f75..ced917853bed 100644 --- a/polkadot/primitives/src/vstaging/mod.rs +++ b/polkadot/primitives/src/vstaging/mod.rs @@ -17,39 +17,13 @@ //! Staging Primitives. // Put any primitives used by staging APIs functions here -pub use crate::v6::*; +use crate::v7::*; use parity_scale_codec::{Decode, Encode}; use primitives::RuntimeDebug; use scale_info::TypeInfo; use sp_arithmetic::Perbill; -/// Approval voting configuration parameters -#[derive( - RuntimeDebug, - Copy, - Clone, - PartialEq, - Encode, - Decode, - TypeInfo, - serde::Serialize, - serde::Deserialize, -)] -pub struct ApprovalVotingParams { - /// The maximum number of candidates `approval-voting` can vote for with - /// a single signatures. - /// - /// Setting it to 1, means we send the approval as soon as we have it available. - pub max_approval_coalesce_count: u32, -} - -impl Default for ApprovalVotingParams { - fn default() -> Self { - Self { max_approval_coalesce_count: 1 } - } -} - /// Scheduler configuration parameters. All coretime/ondemand parameters are here. #[derive( RuntimeDebug, @@ -101,7 +75,7 @@ pub struct SchedulerParams { pub on_demand_fee_variability: Perbill, /// The minimum amount needed to claim a slot in the spot pricing queue. pub on_demand_base_fee: Balance, - /// The number of blocks a claim stays in the scheduler's claimqueue before getting cleared. + /// The number of blocks a claim stays in the scheduler's claim queue before getting cleared. /// This number should go reasonably higher than the number of blocks in the async backing /// lookahead. pub ttl: BlockNumber, @@ -124,28 +98,3 @@ impl> Default for SchedulerParams } } } - -use bitvec::vec::BitVec; - -/// Bit indices in the `HostConfiguration.node_features` that correspond to different node features. -pub type NodeFeatures = BitVec; - -/// Module containing feature-specific bit indices into the `NodeFeatures` bitvec. -pub mod node_features { - /// A feature index used to indentify a bit into the node_features array stored - /// in the HostConfiguration. - #[repr(u8)] - pub enum FeatureIndex { - /// Tells if tranch0 assignments could be sent in a single certificate. - /// Reserved for: `` - EnableAssignmentsV2 = 0, - /// This feature enables the extension of `BackedCandidate::validator_indices` by 8 bits. - /// The value stored there represents the assumed core index where the candidates - /// are backed. This is needed for the elastic scaling MVP. - ElasticScalingMVP = 1, - /// First unassigned feature bit. - /// Every time a new feature flag is assigned it should take this value. - /// and this should be incremented. - FirstUnassigned = 2, - } -} diff --git a/polkadot/primitives/test-helpers/src/lib.rs b/polkadot/primitives/test-helpers/src/lib.rs index c0118f5960a4..d43cf3317e57 100644 --- a/polkadot/primitives/test-helpers/src/lib.rs +++ b/polkadot/primitives/test-helpers/src/lib.rs @@ -136,17 +136,17 @@ pub fn dummy_head_data() -> HeadData { /// Create a meaningless collator id. pub fn dummy_collator() -> CollatorId { - CollatorId::from(sr25519::Public::from_raw([0; 32])) + CollatorId::from(sr25519::Public::default()) } /// Create a meaningless validator id. pub fn dummy_validator() -> ValidatorId { - ValidatorId::from(sr25519::Public::from_raw([0; 32])) + ValidatorId::from(sr25519::Public::default()) } /// Create a meaningless collator signature. pub fn dummy_collator_signature() -> CollatorSignature { - CollatorSignature::from(sr25519::Signature([0u8; 64])) + CollatorSignature::from(sr25519::Signature::default()) } /// Create a meaningless persisted validation data. @@ -249,7 +249,7 @@ pub fn resign_candidate_descriptor_with_collator>( descriptor.signature = signature; } -/// Extracts validators's public keus (`ValidatorId`) from `Sr25519Keyring` +/// Extracts validators's public keys (`ValidatorId`) from `Sr25519Keyring` pub fn validator_pubkeys(val_ids: &[Sr25519Keyring]) -> Vec { val_ids.iter().map(|v| v.public().into()).collect() } diff --git a/polkadot/roadmap/implementers-guide/src/disputes-flow.md b/polkadot/roadmap/implementers-guide/src/disputes-flow.md index b5cc5611c6ff..540b3c45bad4 100644 --- a/polkadot/roadmap/implementers-guide/src/disputes-flow.md +++ b/polkadot/roadmap/implementers-guide/src/disputes-flow.md @@ -74,7 +74,7 @@ The set of validators eligible to vote consists of the validators that had duty votes by the backing validators. If a validator receives an initial dispute message (a set of votes where there are at least two opposing votes -contained), and the PoV or Code are hence not reconstructable from local storage, that validator must request the +contained), and the PoV or Code are hence not reconstructible from local storage, that validator must request the required data from its peers. The dispute availability message must contain code, persisted validation data, and the proof of validity. diff --git a/polkadot/roadmap/implementers-guide/src/node/approval/approval-distribution.md b/polkadot/roadmap/implementers-guide/src/node/approval/approval-distribution.md index ce71de6f76b8..c987b7fe5bea 100644 --- a/polkadot/roadmap/implementers-guide/src/node/approval/approval-distribution.md +++ b/polkadot/roadmap/implementers-guide/src/node/approval/approval-distribution.md @@ -101,7 +101,7 @@ struct State { } enum MessageFingerprint { - Assigment(Hash, u32, ValidatorIndex), + Assignment(Hash, u32, ValidatorIndex), Approval(Hash, u32, ValidatorIndex), } @@ -203,7 +203,7 @@ For all peers: * Compute `view_intersection` as the intersection of the peer's view blocks with the hashes of the new blocks. * Invoke `unify_with_peer(peer, view_intersection)`. -#### `ApprovalDistributionMessage::DistributeAsignment` +#### `ApprovalDistributionMessage::DistributeAssignment` Call `import_and_circulate_assignment` with `MessageSource::Local`. diff --git a/polkadot/roadmap/implementers-guide/src/node/availability/availability-recovery.md b/polkadot/roadmap/implementers-guide/src/node/availability/availability-recovery.md index e3bb14db3a55..c57c4589244e 100644 --- a/polkadot/roadmap/implementers-guide/src/node/availability/availability-recovery.md +++ b/polkadot/roadmap/implementers-guide/src/node/availability/availability-recovery.md @@ -115,7 +115,7 @@ On `Conclude`, shut down the subsystem. Launch the source as a background task running `run(recovery_task)`. -#### `run(recovery_task) -> Result` +#### `run(recovery_task) -> Result` ```rust // How many parallel requests to have going at once. diff --git a/polkadot/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md b/polkadot/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md index e0738e219d1b..90b29249f3e5 100644 --- a/polkadot/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md +++ b/polkadot/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md @@ -76,7 +76,7 @@ dispute is raised reconsider their vote and send an explicit invalid vote. If th recorded, then they could avoid a slash. This is not a problem for our basic security assumptions: The backers are the ones to be supposed to have skin in the -game, so we are not too woried about colluding approval voters getting away slash free as the gambler's ruin property is +game, so we are not too worried about colluding approval voters getting away slash free as the gambler's ruin property is maintained anyway. There is however a separate problem, from colluding approval-voters, that is "lazy" approval voters. If it were easy and reliable for approval-voters to reconsider their vote, in case of an actual dispute, then they don't have a direct incentive (apart from playing a part in securing the network) to properly run the validation function at @@ -331,13 +331,13 @@ off-chain. The reason for this is a dispute can be raised for a candidate in a p validator that is going to be slashed for it might not even be in the current active set. That means it can't be disabled on-chain. We need a way to prevent someone from disputing all valid candidates in the previous era. We do this by keeping track of the validators who lost a dispute in the past few sessions and use that list in addition to the -on-chain disabled validators state. In addition to past session misbehavior, this also heps in case a slash is delayed. +on-chain disabled validators state. In addition to past session misbehavior, this also helps in case a slash is delayed. When we receive a dispute statements set, we do the following: 1. Take the on-chain state of disabled validators at the relay parent block. 1. Take a list of those who lost a dispute in that session in the order that prioritizes the biggest and newest offence. 1. Combine the two lists and take the first byzantine threshold validators from it. -1. If the dispute is unconfimed, check if all votes against the candidate are from disabled validators. +1. If the dispute is unconfirmed, check if all votes against the candidate are from disabled validators. If so, we don't participate in the dispute, but record the votes. ### Backing Votes @@ -591,7 +591,7 @@ Initiates processing via the `Participation` module and updates the internal sta ### On `MuxedMessage::Participation` -This message is sent from `Participatuion` module and indicates a processed dispute participation. It's the result of +This message is sent from `Participation` module and indicates a processed dispute participation. It's the result of the processing job initiated with `OverseerSignal::ActiveLeaves`. The subsystem issues a `DisputeMessage` with the result. diff --git a/polkadot/roadmap/implementers-guide/src/node/overseer.md b/polkadot/roadmap/implementers-guide/src/node/overseer.md index 53a115308101..960539b84998 100644 --- a/polkadot/roadmap/implementers-guide/src/node/overseer.md +++ b/polkadot/roadmap/implementers-guide/src/node/overseer.md @@ -108,11 +108,11 @@ way that the receiving subsystem can further address the communication to one of This communication prevents a certain class of race conditions. When the Overseer determines that it is time for subsystems to begin working on top of a particular relay-parent, it will dispatch a `ActiveLeavesUpdate` message to all subsystems to do so, and those messages will be handled asynchronously by those subsystems. Some subsystems will receive -those messsages before others, and it is important that a message sent by subsystem A after receiving +those messages before others, and it is important that a message sent by subsystem A after receiving `ActiveLeavesUpdate` message will arrive at subsystem B after its `ActiveLeavesUpdate` message. If subsystem A maintained an independent channel with subsystem B to communicate, it would be possible for subsystem B to handle the side message before the `ActiveLeavesUpdate` message, but it wouldn't have any logical course of action to take with the -side message - leading to it being discarded or improperly handled. Well-architectured state machines should have a +side message - leading to it being discarded or improperly handled. Well-architected state machines should have a single source of inputs, so that is what we do here. One exception is reasonable to make for responses to requests. A request should be made via the overseer in order to diff --git a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-prechecker.md b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-prechecker.md index 7f6fef7ddf63..f8e88a67fcb8 100644 --- a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-prechecker.md +++ b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-prechecker.md @@ -8,7 +8,7 @@ pre-checking. Head over to [overview] for the PVF pre-checking process overview. There is no dedicated input mechanism for PVF pre-checker. Instead, PVF pre-checker looks on the `ActiveLeavesUpdate` event stream for work. -This subsytem does not produce any output messages either. The subsystem will, however, send messages to the +This subsystem does not produce any output messages either. The subsystem will, however, send messages to the [Runtime API] subsystem to query for the pending PVFs and to submit votes. In addition to that, it will also communicate with [Candidate Validation] Subsystem to request PVF pre-check. diff --git a/polkadot/roadmap/implementers-guide/src/runtime/README.md b/polkadot/roadmap/implementers-guide/src/runtime/README.md index 459f0e6b69d9..10eedb49ec38 100644 --- a/polkadot/roadmap/implementers-guide/src/runtime/README.md +++ b/polkadot/roadmap/implementers-guide/src/runtime/README.md @@ -89,7 +89,7 @@ struct SessionChangeNotification { prev_config: HostConfiguration, // The configuration after handling the session change. new_config: HostConfiguration, - // A secure randomn seed for the session, gathered from BABE. + // A secure random seed for the session, gathered from BABE. random_seed: [u8; 32], // The session index of the beginning session. session_index: SessionIndex, diff --git a/polkadot/roadmap/implementers-guide/src/runtime/hrmp.md b/polkadot/roadmap/implementers-guide/src/runtime/hrmp.md index 69d33ca8670d..ed765634b592 100644 --- a/polkadot/roadmap/implementers-guide/src/runtime/hrmp.md +++ b/polkadot/roadmap/implementers-guide/src/runtime/hrmp.md @@ -62,7 +62,7 @@ HRMP related storage layout HrmpOpenChannelRequests: map HrmpChannelId => Option; HrmpOpenChannelRequestsList: Vec; -/// This mapping tracks how many open channel requests are inititated by a given sender para. +/// This mapping tracks how many open channel requests are initiated by a given sender para. /// Invariant: `HrmpOpenChannelRequests` should contain the same number of items that has `(X, _)` /// as the number of `HrmpOpenChannelRequestCount` for `X`. HrmpOpenChannelRequestCount: map ParaId => u32; @@ -233,7 +233,7 @@ executed the message. 1. Send a downward message to the opposite party notifying about the channel closing. * The DM is sent using `queue_downward_message`. * The DM is represented by the `HrmpChannelClosing` XCM message with: - * `initator` is set to `origin`, + * `initiator` is set to `origin`, * `sender` is set to `ch.sender`, * `recipient` is set to `ch.recipient`. * The opposite party is `ch.sender` if `origin` is `ch.recipient` and `ch.recipient` if `origin` is `ch.sender`. diff --git a/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md b/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md index f6a32a01d502..fd74f33253b7 100644 --- a/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md +++ b/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md @@ -147,15 +147,16 @@ All failed checks should lead to an unrecoverable error making the block invalid // return a vector of cleaned-up core IDs. } ``` -* `force_enact(ParaId)`: Forcibly enact the candidate with the given ID as though it had been deemed available by - bitfields. Is a no-op if there is no candidate pending availability for this para-id. This should generally not be - used but it is useful during execution of Runtime APIs, where the changes to the state are expected to be discarded - directly after. +* `force_enact(ParaId)`: Forcibly enact the pending candidates of the given paraid as though they had been deemed + available by bitfields. Is a no-op if there is no candidate pending availability for this para-id. + If there are multiple candidates pending availability for this para-id, it will enact all of + them. This should generally not be used but it is useful during execution of Runtime APIs, + where the changes to the state are expected to be discarded directly after. * `candidate_pending_availability(ParaId) -> Option`: returns the `CommittedCandidateReceipt` pending availability for the para provided, if any. * `pending_availability(ParaId) -> Option`: returns the metadata around the candidate pending availability for the para, if any. -* `collect_disputed(disputed: Vec) -> Vec`: Sweeps through all paras pending availability. If +* `free_disputed(disputed: Vec) -> Vec`: Sweeps through all paras pending availability. If the candidate hash is one of the disputed candidates, then clean up the corresponding storage for that candidate and the commitments. Return a vector of cleaned-up core IDs. diff --git a/polkadot/roadmap/implementers-guide/src/runtime/parainherent.md b/polkadot/roadmap/implementers-guide/src/runtime/parainherent.md index 5419ddae83d4..7972c706b9ee 100644 --- a/polkadot/roadmap/implementers-guide/src/runtime/parainherent.md +++ b/polkadot/roadmap/implementers-guide/src/runtime/parainherent.md @@ -17,7 +17,7 @@ There are a couple of important notes to the operations in this inherent as they this fork. 1. When disputes are initiated, we remove the block from pending availability. This allows us to roll back chains to the block before blocks are included as opposed to backing. It's important to do this before processing bitfields. -1. `Inclusion::collect_disputed` is kind of expensive so it's important to gate this on whether there are actually any +1. `Inclusion::free_disputed` is kind of expensive so it's important to gate this on whether there are actually any new disputes. Which should be never. 1. And we don't accept parablocks that have open disputes or disputes that have concluded against the candidate. It's important to import dispute statements before backing, but this is already the case as disputes are imported before @@ -88,7 +88,7 @@ to `sanitize_bitfields` function for implementation details. Backed candidates sanitization removes malformed ones, candidates which have got concluded invalid disputes against them or candidates produced by unassigned cores. Furthermore any backing votes from disabled validators for a candidate are dropped. This is part of the validator disabling strategy. After filtering the statements from disabled validators a -backed candidate may end up with votes count less than `minimum_backing_votes` (a parameter from `HostConfiguiration`). +backed candidate may end up with votes count less than `minimum_backing_votes` (a parameter from `HostConfiguration`). In this case the whole candidate is dropped otherwise it will be rejected by `process_candidates` from pallet inclusion. All checks related to backed candidates are implemented in `sanitize_backed_candidates` and `filter_backed_statements_from_disabled_validators`. diff --git a/polkadot/roadmap/implementers-guide/src/runtime/scheduler.md b/polkadot/roadmap/implementers-guide/src/runtime/scheduler.md index 32a7fe652dbc..04b221a83e58 100644 --- a/polkadot/roadmap/implementers-guide/src/runtime/scheduler.md +++ b/polkadot/roadmap/implementers-guide/src/runtime/scheduler.md @@ -285,7 +285,6 @@ No finalization routine runs for this module. - This clears them from `Scheduled` and marks each corresponding `core` in the `AvailabilityCores` as occupied. - Since both the availability cores and the newly-occupied cores lists are sorted ascending, this method can be implemented efficiently. -- `core_para(CoreIndex) -> ParaId`: return the currently-scheduled or occupied ParaId for the given core. - `group_validators(GroupIndex) -> Option>`: return all validators in a given group, if the group index is valid for this session. - `availability_timeout_predicate() -> Option bool>`: returns an optional predicate diff --git a/polkadot/roadmap/implementers-guide/src/types/approval.md b/polkadot/roadmap/implementers-guide/src/types/approval.md index c19ffa53762a..29d973ca0ab8 100644 --- a/polkadot/roadmap/implementers-guide/src/types/approval.md +++ b/polkadot/roadmap/implementers-guide/src/types/approval.md @@ -39,7 +39,7 @@ enum AssignmentCertKindV2 { /// The core index chosen in this cert. core_index: CoreIndex, }, - /// Deprectated assignment. Soon to be removed. + /// Deprecated assignment. Soon to be removed. /// /// An assignment story based on the VRF that authorized the relay-chain block where the /// candidate was included combined with a sample number. @@ -117,7 +117,7 @@ struct IndirectSignedApprovalVote { ## `CheckedAssignmentCert` An assignment cert which has checked both the VRF and the validity of the implied assignment according to the selection -criteria rules of the protocol. This type should be declared in such a way as to be instantiatable only when the checks +criteria rules of the protocol. This type should be declared in such a way as to be instantiable only when the checks have actually been done. Fields should be accessible via getters, not direct struct access. ```rust diff --git a/polkadot/roadmap/implementers-guide/src/types/disputes.md b/polkadot/roadmap/implementers-guide/src/types/disputes.md index c49e0fea2625..ac09084c48a2 100644 --- a/polkadot/roadmap/implementers-guide/src/types/disputes.md +++ b/polkadot/roadmap/implementers-guide/src/types/disputes.md @@ -82,7 +82,7 @@ struct DisputeState { struct ScrapedOnChainVotes { /// The session index at which the block was included. session: SessionIndex, - /// The backing and seconding validity attestations for all candidates, provigind the full candidate receipt. + /// The backing and seconding validity attestations for all candidates, providing the full candidate receipt. backing_validators_per_candidate: Vec<(CandidateReceipt, Vec<(ValidatorIndex, ValidityAttestation)>)> /// Set of concluded disputes that were recorded /// on chain within the inherent. diff --git a/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md b/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md index 54cdc2edd12d..e011afb97089 100644 --- a/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md +++ b/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md @@ -22,7 +22,7 @@ All subsystems have their own message types; all of them need to be able to list are currently two proposals for how to handle that with unified communication channels: 1. Retaining the `OverseerSignal` definition above, add `enum FromOrchestra {Signal(OverseerSignal), Message(T)}`. -1. Add a generic varint to `OverseerSignal`: `Message(T)`. +1. Add a generic variant to `OverseerSignal`: `Message(T)`. Either way, there will be some top-level type encapsulating messages from the overseer to each subsystem. @@ -340,9 +340,15 @@ enum BitfieldSigningMessage { } ```rust enum CandidateBackingMessage { /// Requests a set of backable candidates attested by the subsystem. - /// - /// Each pair is (candidate_hash, candidate_relay_parent). - GetBackedCandidates(Vec<(CandidateHash, Hash)>, oneshot::Sender>), + /// The order of candidates of the same para must be preserved in the response. + /// If a backed candidate of a para cannot be retrieved, the response should not contain any + /// candidates of the same para that follow it in the input vector. In other words, assuming + /// candidates are supplied in dependency order, we must ensure that this dependency order is + /// preserved. + GetBackedCandidates( + HashMap>, + oneshot::Sender>>, + ), /// Note that the Candidate Backing subsystem should second the given candidate in the context of the /// given relay-parent (ref. by hash). This candidate must be validated using the provided PoV. /// The PoV is expected to match the `pov_hash` in the descriptor. diff --git a/polkadot/roadmap/phase-1.toml b/polkadot/roadmap/phase-1.toml index 3a5f0d752deb..9b9374d234bd 100644 --- a/polkadot/roadmap/phase-1.toml +++ b/polkadot/roadmap/phase-1.toml @@ -34,7 +34,7 @@ requires = ["two-phase-inclusion"] items = [ { label = "Submit secondary checks to runtime", port = "submitsecondary", requires = ["secondary-checking"] }, { label = "Track all candidates within the slash period as well as their session" }, - { label = "Track reports and attestatations for candidates" }, + { label = "Track reports and attestations for candidates" }, ] [[group]] diff --git a/polkadot/runtime/common/Cargo.toml b/polkadot/runtime/common/Cargo.toml index a4696248f70b..0850009a8767 100644 --- a/polkadot/runtime/common/Cargo.toml +++ b/polkadot/runtime/common/Cargo.toml @@ -15,7 +15,7 @@ bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } rustc-hex = { version = "2.1.0", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc"], workspace = true } serde_derive = { workspace = true } static_assertions = "1.1.0" diff --git a/polkadot/runtime/common/src/assigned_slots/migration.rs b/polkadot/runtime/common/src/assigned_slots/migration.rs index 379302839128..07c57b168b54 100644 --- a/polkadot/runtime/common/src/assigned_slots/migration.rs +++ b/polkadot/runtime/common/src/assigned_slots/migration.rs @@ -15,7 +15,7 @@ // along with Polkadot. If not, see . use super::{Config, MaxPermanentSlots, MaxTemporarySlots, Pallet, LOG_TARGET}; -use frame_support::traits::{Get, GetStorageVersion, OnRuntimeUpgrade}; +use frame_support::traits::{Get, GetStorageVersion, UncheckedOnRuntimeUpgrade}; #[cfg(feature = "try-runtime")] use alloc::vec::Vec; @@ -23,10 +23,9 @@ use alloc::vec::Vec; use frame_support::ensure; pub mod v1 { - use super::*; pub struct VersionUncheckedMigrateToV1(core::marker::PhantomData); - impl OnRuntimeUpgrade for VersionUncheckedMigrateToV1 { + impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateToV1 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { let on_chain_version = Pallet::::on_chain_storage_version(); diff --git a/polkadot/runtime/common/src/paras_registrar/migration.rs b/polkadot/runtime/common/src/paras_registrar/migration.rs index 2f284ad08a6d..6b110d2ff5d5 100644 --- a/polkadot/runtime/common/src/paras_registrar/migration.rs +++ b/polkadot/runtime/common/src/paras_registrar/migration.rs @@ -15,7 +15,7 @@ // along with Polkadot. If not, see . use super::*; -use frame_support::traits::{Contains, OnRuntimeUpgrade}; +use frame_support::traits::{Contains, UncheckedOnRuntimeUpgrade}; #[derive(Encode, Decode)] pub struct ParaInfoV1 { @@ -27,7 +27,7 @@ pub struct ParaInfoV1 { pub struct VersionUncheckedMigrateToV1( core::marker::PhantomData<(T, UnlockParaIds)>, ); -impl> OnRuntimeUpgrade +impl> UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateToV1 { fn on_runtime_upgrade() -> Weight { diff --git a/polkadot/runtime/common/src/paras_registrar/mod.rs b/polkadot/runtime/common/src/paras_registrar/mod.rs index 6b5d87e7cfaa..b522433017a0 100644 --- a/polkadot/runtime/common/src/paras_registrar/mod.rs +++ b/polkadot/runtime/common/src/paras_registrar/mod.rs @@ -35,7 +35,7 @@ use parity_scale_codec::{Decode, Encode}; use primitives::{HeadData, Id as ParaId, ValidationCode, LOWEST_PUBLIC_ID, MIN_CODE_SIZE}; use runtime_parachains::{ configuration, ensure_parachain, - paras::{self, OnNewHead, ParaGenesisArgs, ParaKind, SetGoAhead}, + paras::{self, ParaGenesisArgs, UpgradeStrategy}, Origin, ParaLifecycle, }; use scale_info::TypeInfo; @@ -408,6 +408,13 @@ pub mod pallet { /// Schedule a parachain upgrade. /// + /// This will kick off a check of `new_code` by all validators. After the majority of the + /// validators have reported on the validity of the code, the code will either be enacted + /// or the upgrade will be rejected. If the code will be enacted, the current code of the + /// parachain will be overwritten directly. This means that any PoV will be checked by this + /// new code. The parachain itself will not be informed explictely that the validation code + /// has changed. + /// /// Can be called by Root, the parachain, or the parachain manager if the parachain is /// unlocked. #[pallet::call_index(7)] @@ -418,7 +425,11 @@ pub mod pallet { new_code: ValidationCode, ) -> DispatchResult { Self::ensure_root_para_or_owner(origin, para)?; - runtime_parachains::schedule_code_upgrade::(para, new_code, SetGoAhead::No)?; + runtime_parachains::schedule_code_upgrade::( + para, + new_code, + UpgradeStrategy::ApplyAtExpectedBlock, + )?; Ok(()) } diff --git a/polkadot/runtime/common/src/purchase.rs b/polkadot/runtime/common/src/purchase.rs index 4a49fdbd82b4..e46b4e3b4d4c 100644 --- a/polkadot/runtime/common/src/purchase.rs +++ b/polkadot/runtime/common/src/purchase.rs @@ -424,8 +424,8 @@ pub mod pallet { impl Pallet { fn verify_signature(who: &T::AccountId, signature: &[u8]) -> Result<(), DispatchError> { // sr25519 always expects a 64 byte signature. - let signature: AnySignature = sr25519::Signature::from_slice(signature) - .ok_or(Error::::InvalidSignature)? + let signature: AnySignature = sr25519::Signature::try_from(signature) + .map_err(|_| Error::::InvalidSignature)? .into(); // In Polkadot, the AccountId is always the same as the 32 byte public key. diff --git a/polkadot/runtime/common/src/xcm_sender.rs b/polkadot/runtime/common/src/xcm_sender.rs index fe56750d9c04..5254f908f61f 100644 --- a/polkadot/runtime/common/src/xcm_sender.rs +++ b/polkadot/runtime/common/src/xcm_sender.rs @@ -143,7 +143,7 @@ where /// Implementation of `xcm_builder::EnsureDelivery` which helps to ensure delivery to the /// `ParaId` parachain (sibling or child). Deposits existential deposit for origin (if needed). /// Deposits estimated fee to the origin account (if needed). -/// Allows to trigger additional logic for specific `ParaId` (e.g. open HRMP channel) (if neeeded). +/// Allows to trigger additional logic for specific `ParaId` (e.g. open HRMP channel) (if needed). #[cfg(feature = "runtime-benchmarks")] pub struct ToParachainDeliveryHelper< XcmConfig, diff --git a/polkadot/runtime/parachains/Cargo.toml b/polkadot/runtime/parachains/Cargo.toml index 79e9ec4003c2..e0fe58654f6c 100644 --- a/polkadot/runtime/parachains/Cargo.toml +++ b/polkadot/runtime/parachains/Cargo.toml @@ -15,7 +15,7 @@ bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } log = { workspace = true } rustc-hex = { version = "2.1.0", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], workspace = true } derive_more = "0.99.17" bitflags = "1.3.2" @@ -58,7 +58,7 @@ polkadot-runtime-metrics = { path = "../metrics", default-features = false } polkadot-core-primitives = { path = "../../core-primitives", default-features = false } [dev-dependencies] -futures = "0.3.21" +futures = "0.3.30" hex-literal = "0.4.1" keyring = { package = "sp-keyring", path = "../../../substrate/primitives/keyring" } frame-support-test = { path = "../../../substrate/frame/support/test" } diff --git a/polkadot/runtime/parachains/src/assigner_coretime/tests.rs b/polkadot/runtime/parachains/src/assigner_coretime/tests.rs index 67fbd346176c..b3d47ea5b820 100644 --- a/polkadot/runtime/parachains/src/assigner_coretime/tests.rs +++ b/polkadot/runtime/parachains/src/assigner_coretime/tests.rs @@ -622,7 +622,7 @@ fn assignment_proportions_in_core_state_work() { ); } - // Case 2: Current assignment remaning < step after pop + // Case 2: Current assignment remaining < step after pop { assert_eq!( CoretimeAssigner::pop_assignment_for_core(core_idx), diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs b/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs index 8360e7a78d0a..779d6f04e396 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs @@ -70,11 +70,7 @@ mod benchmarks { let para_id = ParaId::from(111u32); init_parathread::(para_id); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - let order = EnqueuedOrder::new(para_id); - - for _ in 0..s { - Pallet::::add_on_demand_order(order.clone(), QueuePushDirection::Back).unwrap(); - } + Pallet::::populate_queue(para_id, s); #[extrinsic_call] _(RawOrigin::Signed(caller.into()), BalanceOf::::max_value(), para_id) @@ -87,11 +83,8 @@ mod benchmarks { let para_id = ParaId::from(111u32); init_parathread::(para_id); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - let order = EnqueuedOrder::new(para_id); - for _ in 0..s { - Pallet::::add_on_demand_order(order.clone(), QueuePushDirection::Back).unwrap(); - } + Pallet::::populate_queue(para_id, s); #[extrinsic_call] _(RawOrigin::Signed(caller.into()), BalanceOf::::max_value(), para_id) diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs b/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs new file mode 100644 index 000000000000..8589ddc292bd --- /dev/null +++ b/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs @@ -0,0 +1,181 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A module that is responsible for migration of storage. +use super::*; +use frame_support::{ + migrations::VersionedMigration, pallet_prelude::ValueQuery, storage_alias, + traits::UncheckedOnRuntimeUpgrade, weights::Weight, +}; + +mod v0 { + use super::*; + use sp_std::collections::vec_deque::VecDeque; + + #[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Clone)] + pub(super) struct EnqueuedOrder { + pub para_id: ParaId, + } + + /// Keeps track of the multiplier used to calculate the current spot price for the on demand + /// assigner. + /// NOTE: Ignoring the `OnEmpty` field for the migration. + #[storage_alias] + pub(super) type SpotTraffic = StorageValue, FixedU128, ValueQuery>; + + /// The order storage entry. Uses a VecDeque to be able to push to the front of the + /// queue from the scheduler on session boundaries. + /// NOTE: Ignoring the `OnEmpty` field for the migration. + #[storage_alias] + pub(super) type OnDemandQueue = + StorageValue, VecDeque, ValueQuery>; +} + +mod v1 { + use super::*; + + use crate::assigner_on_demand::LOG_TARGET; + + /// Migration to V1 + pub struct UncheckedMigrateToV1(sp_std::marker::PhantomData); + impl UncheckedOnRuntimeUpgrade for UncheckedMigrateToV1 { + fn on_runtime_upgrade() -> Weight { + let mut weight: Weight = Weight::zero(); + + // Migrate the current traffic value + let config = >::config(); + QueueStatus::::mutate(|mut queue_status| { + Pallet::::update_spot_traffic(&config, &mut queue_status); + + let v0_queue = v0::OnDemandQueue::::take(); + // Process the v0 queue into v1. + v0_queue.into_iter().for_each(|enqueued_order| { + // Readding the old orders will use the new systems. + Pallet::::add_on_demand_order( + queue_status, + enqueued_order.para_id, + QueuePushDirection::Back, + ); + }); + }); + + // Remove the old storage. + v0::OnDemandQueue::::kill(); // 1 write + v0::SpotTraffic::::kill(); // 1 write + + // Config read + weight.saturating_accrue(T::DbWeight::get().reads(1)); + // QueueStatus read write (update_spot_traffic) + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + // Kill x 2 + weight.saturating_accrue(T::DbWeight::get().writes(2)); + + log::info!(target: LOG_TARGET, "Migrated on demand assigner storage to v1"); + weight + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + let n: u32 = v0::OnDemandQueue::::get().len() as u32; + + log::info!( + target: LOG_TARGET, + "Number of orders waiting in the queue before: {n}", + ); + + Ok(n.encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + log::info!(target: LOG_TARGET, "Running post_upgrade()"); + + ensure!( + v0::OnDemandQueue::::get().is_empty(), + "OnDemandQueue should be empty after the migration" + ); + + let expected_len = u32::decode(&mut &state[..]).unwrap(); + let queue_status_size = QueueStatus::::get().size(); + ensure!( + expected_len == queue_status_size, + "Number of orders should be the same before and after migration" + ); + + let n_affinity_entries: u32 = + AffinityEntries::::iter().map(|(_index, heap)| heap.len() as u32).sum(); + let n_para_id_affinity: u32 = ParaIdAffinity::::iter() + .map(|(_para_id, affinity)| affinity.count as u32) + .sum(); + ensure!( + n_para_id_affinity == n_affinity_entries, + "Number of affinity entries should be the same as the counts in ParaIdAffinity" + ); + + Ok(()) + } + } +} + +/// Migrate `V0` to `V1` of the storage format. +pub type MigrateV0ToV1 = VersionedMigration< + 0, + 1, + v1::UncheckedMigrateToV1, + Pallet, + ::DbWeight, +>; + +#[cfg(test)] +mod tests { + use super::{v0, v1, UncheckedOnRuntimeUpgrade, Weight}; + use crate::mock::{new_test_ext, MockGenesisConfig, OnDemandAssigner, Test}; + use primitives::Id as ParaId; + + #[test] + fn migration_to_v1_preserves_queue_ordering() { + new_test_ext(MockGenesisConfig::default()).execute_with(|| { + // Place orders for paraids 1..5 + for i in 1..=5 { + v0::OnDemandQueue::::mutate(|queue| { + queue.push_back(v0::EnqueuedOrder { para_id: ParaId::new(i) }) + }); + } + + // Queue has 5 orders + let old_queue = v0::OnDemandQueue::::get(); + assert_eq!(old_queue.len(), 5); + // New queue has 0 orders + assert_eq!(OnDemandAssigner::get_queue_status().size(), 0); + + // For tests, db weight is zero. + assert_eq!( + as UncheckedOnRuntimeUpgrade>::on_runtime_upgrade(), + Weight::zero() + ); + + // New queue has 5 orders + assert_eq!(OnDemandAssigner::get_queue_status().size(), 5); + + // Compare each entry from the old queue with the entry in the new queue. + old_queue.iter().zip(OnDemandAssigner::get_free_entries().iter()).for_each( + |(old_enq, new_enq)| { + assert_eq!(old_enq.para_id, new_enq.para_id); + }, + ); + }); + } +} diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs index 5c33fbfa48f9..270bfdf7361c 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs @@ -16,22 +16,32 @@ //! The parachain on demand assignment module. //! -//! Implements a mechanism for taking in orders for pay as you go (PAYG) or on demand -//! parachain (previously parathreads) assignments. This module is not handled by the -//! initializer but is instead instantiated in the `construct_runtime` macro. +//! Implements a mechanism for taking in orders for on-demand parachain (previously parathreads) +//! assignments. This module is not handled by the initializer but is instead instantiated in the +//! `construct_runtime` macro. //! //! The module currently limits parallel execution of blocks from the same `ParaId` via //! a core affinity mechanism. As long as there exists an affinity for a `CoreIndex` for //! a specific `ParaId`, orders for blockspace for that `ParaId` will only be assigned to -//! that `CoreIndex`. This affinity mechanism can be removed if it can be shown that parallel -//! execution is valid. +//! that `CoreIndex`. +//! +//! NOTE: Once we have elastic scaling implemented we might want to extend this module to support +//! ignoring core affinity up to a certain extend. This should be opt-in though as the parachain +//! needs to support multiple cores in the same block. If we want to enable a single parachain +//! occupying multiple cores in on-demand, we will likely add a separate order type, where the +//! intent can be made explicit. mod benchmarking; +pub mod migration; mod mock_helpers; +extern crate alloc; + #[cfg(test)] mod tests; +use core::mem::take; + use crate::{configuration, paras, scheduler::common::Assignment}; use frame_support::{ @@ -43,13 +53,16 @@ use frame_support::{ }, }; use frame_system::pallet_prelude::*; -use primitives::{CoreIndex, Id as ParaId}; +use primitives::{CoreIndex, Id as ParaId, ON_DEMAND_MAX_QUEUE_MAX_SIZE}; use sp_runtime::{ traits::{One, SaturatedConversion}, FixedPointNumber, FixedPointOperand, FixedU128, Perbill, Saturating, }; -use alloc::collections::vec_deque::VecDeque; +use alloc::collections::BinaryHeap; +use core::{ + cmp::{Ord, Ordering, PartialOrd}, +}; const LOG_TARGET: &str = "runtime::parachains::assigner-on-demand"; @@ -73,17 +86,116 @@ impl WeightInfo for TestWeightInfo { } } +/// Meta data for full queue. +/// +/// This includes elements with affinity and free entries. +/// +/// The actual queue is implemented via multiple priority queues. One for each core, for entries +/// which currently have a core affinity and one free queue, with entries without any affinity yet. +/// +/// The design aims to have most queue accessess be O(1) or O(log(N)). Absolute worst case is O(N). +/// Importantly this includes all accessess that happen in a single block. Even with 50 cores, the +/// total complexity of all operations in the block should maintain above complexities. In +/// particular O(N) stays O(N), it should never be O(N*cores). +/// +/// More concrete rundown on complexity: +/// +/// - insert: O(1) for placing an order, O(log(N)) for push backs. +/// - pop_assignment_for_core: O(log(N)), O(N) worst case: Can only happen for one core, next core +/// is already less work. +/// - report_processed & push back: If affinity dropped to 0, then O(N) in the worst case. Again +/// this divides per core. +/// +/// Reads still exist, also improved slightly, but worst case we fetch all entries. +#[derive(Encode, Decode, TypeInfo)] +struct QueueStatusType { + /// Last calculated traffic value. + traffic: FixedU128, + /// The next index to use. + next_index: QueueIndex, + /// Smallest index still in use. + /// + /// In case of a completely empty queue (free + affinity queues), `next_index - smallest_index + /// == 0`. + smallest_index: QueueIndex, + /// Indices that have been freed already. + /// + /// But have a hole to `smallest_index`, so we can not yet bump `smallest_index`. This binary + /// heap is roughly bounded in the number of on demand cores: + /// + /// For a single core, elements will always be processed in order. With each core added, a + /// level of out of order execution is added. + freed_indices: BinaryHeap, +} + +impl Default for QueueStatusType { + fn default() -> QueueStatusType { + QueueStatusType { + traffic: FixedU128::default(), + next_index: QueueIndex(0), + smallest_index: QueueIndex(0), + freed_indices: BinaryHeap::new(), + } + } +} + +impl QueueStatusType { + /// How many orders are queued in total? + /// + /// This includes entries which have core affinity. + fn size(&self) -> u32 { + self.next_index + .0 + .overflowing_sub(self.smallest_index.0) + .0 + .saturating_sub(self.freed_indices.len() as u32) + } + + /// Get current next index + /// + /// to use for an element newly pushed to the back of the queue. + fn push_back(&mut self) -> QueueIndex { + let QueueIndex(next_index) = self.next_index; + self.next_index = QueueIndex(next_index.overflowing_add(1).0); + QueueIndex(next_index) + } + + /// Push something to the front of the queue + fn push_front(&mut self) -> QueueIndex { + self.smallest_index = QueueIndex(self.smallest_index.0.overflowing_sub(1).0); + self.smallest_index + } + + /// The given index is no longer part of the queue. + /// + /// This updates `smallest_index` if need be. + fn consume_index(&mut self, removed_index: QueueIndex) { + if removed_index != self.smallest_index { + self.freed_indices.push(removed_index.reverse()); + return + } + let mut index = self.smallest_index.0.overflowing_add(1).0; + // Even more to advance? + while self.freed_indices.peek() == Some(&ReverseQueueIndex(index)) { + index = index.overflowing_add(1).0; + self.freed_indices.pop(); + } + self.smallest_index = QueueIndex(index); + } +} + /// Keeps track of how many assignments a scheduler currently has at a specific `CoreIndex` for a /// specific `ParaId`. #[derive(Encode, Decode, Default, Clone, Copy, TypeInfo)] #[cfg_attr(test, derive(PartialEq, RuntimeDebug))] -pub struct CoreAffinityCount { - core_idx: CoreIndex, +struct CoreAffinityCount { + core_index: CoreIndex, count: u32, } /// An indicator as to which end of the `OnDemandQueue` an assignment will be placed. -pub enum QueuePushDirection { +#[cfg_attr(test, derive(RuntimeDebug))] +enum QueuePushDirection { Back, Front, } @@ -93,9 +205,8 @@ type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; /// Errors that can happen during spot traffic calculation. -#[derive(PartialEq)] -#[cfg_attr(feature = "std", derive(Debug))] -pub enum SpotTrafficCalculationErr { +#[derive(PartialEq, RuntimeDebug)] +enum SpotTrafficCalculationErr { /// The order queue capacity is at 0. QueueCapacityIsZero, /// The queue size is larger than the queue capacity. @@ -104,15 +215,85 @@ pub enum SpotTrafficCalculationErr { Division, } +/// Type used for priority indices. +// NOTE: The `Ord` implementation for this type is unsound in the general case. +// Do not use it for anything but it's intended purpose. +#[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Clone, Eq, Copy)] +struct QueueIndex(u32); + +/// QueueIndex with reverse ordering. +/// +/// Same as `Reverse(QueueIndex)`, but with all the needed traits implemented. +#[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Clone, Eq, Copy)] +struct ReverseQueueIndex(u32); + +impl QueueIndex { + fn reverse(self) -> ReverseQueueIndex { + ReverseQueueIndex(self.0) + } +} + +impl Ord for QueueIndex { + fn cmp(&self, other: &Self) -> Ordering { + let diff = self.0.overflowing_sub(other.0).0; + if diff == 0 { + Ordering::Equal + } else if diff <= ON_DEMAND_MAX_QUEUE_MAX_SIZE { + Ordering::Greater + } else { + Ordering::Less + } + } +} + +impl PartialOrd for QueueIndex { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for ReverseQueueIndex { + fn cmp(&self, other: &Self) -> Ordering { + QueueIndex(other.0).cmp(&QueueIndex(self.0)) + } +} +impl PartialOrd for ReverseQueueIndex { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(&other)) + } +} + /// Internal representation of an order after it has been enqueued already. -#[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Clone)] -pub(super) struct EnqueuedOrder { - pub para_id: ParaId, +/// +/// This data structure is provided for a min BinaryHeap (Ord compares in reverse order with regards +/// to its elements) +#[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Clone, Eq)] +struct EnqueuedOrder { + para_id: ParaId, + idx: QueueIndex, } impl EnqueuedOrder { - pub fn new(para_id: ParaId) -> Self { - Self { para_id } + fn new(idx: QueueIndex, para_id: ParaId) -> Self { + Self { idx, para_id } + } +} + +impl PartialOrd for EnqueuedOrder { + fn partial_cmp(&self, other: &Self) -> Option { + match other.idx.partial_cmp(&self.idx) { + Some(Ordering::Equal) => other.para_id.partial_cmp(&self.para_id), + o => o, + } + } +} + +impl Ord for EnqueuedOrder { + fn cmp(&self, other: &Self) -> Ordering { + match other.idx.cmp(&self.idx) { + Ordering::Equal => other.para_id.cmp(&self.para_id), + o => o, + } } } @@ -121,8 +302,11 @@ pub mod pallet { use super::*; + const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + #[pallet::pallet] #[pallet::without_storage_info] + #[pallet::storage_version(STORAGE_VERSION)] pub struct Pallet(_); #[pallet::config] @@ -141,36 +325,44 @@ pub mod pallet { type TrafficDefaultValue: Get; } - /// Creates an empty spot traffic value if one isn't present in storage already. + /// Creates an empty queue status for an empty queue with initial traffic value. #[pallet::type_value] - pub fn SpotTrafficOnEmpty() -> FixedU128 { - T::TrafficDefaultValue::get() + pub(super) fn QueueStatusOnEmpty() -> QueueStatusType { + QueueStatusType { traffic: T::TrafficDefaultValue::get(), ..Default::default() } } - /// Creates an empty on demand queue if one isn't present in storage already. #[pallet::type_value] - pub(super) fn OnDemandQueueOnEmpty() -> VecDeque { - VecDeque::new() + pub(super) fn EntriesOnEmpty() -> BinaryHeap { + BinaryHeap::new() } - /// Keeps track of the multiplier used to calculate the current spot price for the on demand - /// assigner. - #[pallet::storage] - pub(super) type SpotTraffic = - StorageValue<_, FixedU128, ValueQuery, SpotTrafficOnEmpty>; - - /// The order storage entry. Uses a VecDeque to be able to push to the front of the - /// queue from the scheduler on session boundaries. - #[pallet::storage] - pub(super) type OnDemandQueue = - StorageValue<_, VecDeque, ValueQuery, OnDemandQueueOnEmpty>; - /// Maps a `ParaId` to `CoreIndex` and keeps track of how many assignments the scheduler has in /// it's lookahead. Keeping track of this affinity prevents parallel execution of the same /// `ParaId` on two or more `CoreIndex`es. #[pallet::storage] pub(super) type ParaIdAffinity = - StorageMap<_, Twox256, ParaId, CoreAffinityCount, OptionQuery>; + StorageMap<_, Twox64Concat, ParaId, CoreAffinityCount, OptionQuery>; + + /// Overall status of queue (both free + affinity entries) + #[pallet::storage] + pub(super) type QueueStatus = + StorageValue<_, QueueStatusType, ValueQuery, QueueStatusOnEmpty>; + + /// Priority queue for all orders which don't yet (or not any more) have any core affinity. + #[pallet::storage] + pub(super) type FreeEntries = + StorageValue<_, BinaryHeap, ValueQuery, EntriesOnEmpty>; + + /// Queue entries that are currently bound to a particular core due to core affinity. + #[pallet::storage] + pub(super) type AffinityEntries = StorageMap< + _, + Twox64Concat, + CoreIndex, + BinaryHeap, + ValueQuery, + EntriesOnEmpty, + >; #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] @@ -183,9 +375,6 @@ pub mod pallet { #[pallet::error] pub enum Error { - /// The `ParaId` supplied to the `place_order` call is not a valid `ParaThread`, making the - /// call is invalid. - InvalidParaId, /// The order queue is full, `place_order` will not continue. QueueFull, /// The current spot price is higher than the max amount specified in the `place_order` @@ -197,45 +386,14 @@ pub mod pallet { impl Hooks> for Pallet { fn on_initialize(_now: BlockNumberFor) -> Weight { let config = >::config(); - // Calculate spot price multiplier and store it. - let old_traffic = SpotTraffic::::get(); - match Self::calculate_spot_traffic( - old_traffic, - config.scheduler_params.on_demand_queue_max_size, - Self::queue_size(), - config.scheduler_params.on_demand_target_queue_utilization, - config.scheduler_params.on_demand_fee_variability, - ) { - Ok(new_traffic) => { - // Only update storage on change - if new_traffic != old_traffic { - SpotTraffic::::set(new_traffic); - Pallet::::deposit_event(Event::::SpotTrafficSet { - traffic: new_traffic, - }); - return T::DbWeight::get().reads_writes(2, 1) - } - }, - Err(SpotTrafficCalculationErr::QueueCapacityIsZero) => { - log::debug!( - target: LOG_TARGET, - "Error calculating spot traffic: The order queue capacity is at 0." - ); - }, - Err(SpotTrafficCalculationErr::QueueSizeLargerThanCapacity) => { - log::debug!( - target: LOG_TARGET, - "Error calculating spot traffic: The queue size is larger than the queue capacity." - ); - }, - Err(SpotTrafficCalculationErr::Division) => { - log::debug!( - target: LOG_TARGET, - "Error calculating spot traffic: Arithmetic error during division, either division by 0 or over/underflow." - ); - }, - }; - T::DbWeight::get().reads_writes(2, 0) + // We need to update the spot traffic on block initialize in order to account for idle + // blocks. + QueueStatus::::mutate(|queue_status| { + Self::update_spot_traffic(&config, queue_status); + }); + + // 2 reads in config and queuestatus, at maximum 1 write to queuestatus. + T::DbWeight::get().reads_writes(2, 1) } } @@ -258,7 +416,7 @@ pub mod pallet { /// Events: /// - `SpotOrderPlaced` #[pallet::call_index(0)] - #[pallet::weight(::WeightInfo::place_order_allow_death(OnDemandQueue::::get().len() as u32))] + #[pallet::weight(::WeightInfo::place_order_allow_death(QueueStatus::::get().size()))] pub fn place_order_allow_death( origin: OriginFor, max_amount: BalanceOf, @@ -285,7 +443,7 @@ pub mod pallet { /// Events: /// - `SpotOrderPlaced` #[pallet::call_index(1)] - #[pallet::weight(::WeightInfo::place_order_keep_alive(OnDemandQueue::::get().len() as u32))] + #[pallet::weight(::WeightInfo::place_order_keep_alive(QueueStatus::::get().size()))] pub fn place_order_keep_alive( origin: OriginFor, max_amount: BalanceOf, @@ -297,10 +455,78 @@ pub mod pallet { } } +// Internal functions and interface to scheduler/wrapping assignment provider. impl Pallet where BalanceOf: FixedPointOperand, { + /// Take the next queued entry that is available for a given core index. + /// + /// Parameters: + /// - `core_index`: The core index + pub fn pop_assignment_for_core(core_index: CoreIndex) -> Option { + let entry: Result = QueueStatus::::try_mutate(|queue_status| { + AffinityEntries::::try_mutate(core_index, |affinity_entries| { + let free_entry = FreeEntries::::try_mutate(|free_entries| { + let affinity_next = affinity_entries.peek(); + let free_next = free_entries.peek(); + let pick_free = match (affinity_next, free_next) { + (None, _) => true, + (Some(_), None) => false, + (Some(a), Some(f)) => f < a, + }; + if pick_free { + let entry = free_entries.pop().ok_or(())?; + let (mut affinities, free): (BinaryHeap<_>, BinaryHeap<_>) = + take(free_entries) + .into_iter() + .partition(|e| e.para_id == entry.para_id); + affinity_entries.append(&mut affinities); + *free_entries = free; + Ok(entry) + } else { + Err(()) + } + }); + let entry = free_entry.or_else(|()| affinity_entries.pop().ok_or(()))?; + queue_status.consume_index(entry.idx); + Ok(entry) + }) + }); + + let assignment = entry.map(|e| Assignment::Pool { para_id: e.para_id, core_index }).ok()?; + + Pallet::::increase_affinity(assignment.para_id(), core_index); + Some(assignment) + } + + /// Report that the `para_id` & `core_index` combination was processed. + /// + /// This should be called once it is clear that the assignment won't get pushed back anymore. + /// + /// In other words for each `pop_assignment_for_core` a call to this function or + /// `push_back_assignment` must follow, but only one. + pub fn report_processed(para_id: ParaId, core_index: CoreIndex) { + Pallet::::decrease_affinity_update_queue(para_id, core_index); + } + + /// Push an assignment back to the front of the queue. + /// + /// The assignment has not been processed yet. Typically used on session boundaries. + /// + /// NOTE: We are not checking queue size here. So due to push backs it is possible that we + /// exceed the maximum queue size slightly. + /// + /// Parameters: + /// - `para_id`: The para that did not make it. + /// - `core_index`: The core the para was scheduled on. + pub fn push_back_assignment(para_id: ParaId, core_index: CoreIndex) { + Pallet::::decrease_affinity_update_queue(para_id, core_index); + QueueStatus::::mutate(|queue_status| { + Pallet::::add_on_demand_order(queue_status, para_id, QueuePushDirection::Front); + }); + } + /// Helper function for `place_order_*` calls. Used to differentiate between placing orders /// with a keep alive check or to allow the account to be reaped. /// @@ -326,34 +552,62 @@ where ) -> DispatchResult { let config = >::config(); - // Traffic always falls back to 1.0 - let traffic = SpotTraffic::::get(); - - // Calculate spot price - let spot_price: BalanceOf = traffic.saturating_mul_int( - config.scheduler_params.on_demand_base_fee.saturated_into::>(), - ); - - // Is the current price higher than `max_amount` - ensure!(spot_price.le(&max_amount), Error::::SpotPriceHigherThanMaxAmount); + QueueStatus::::mutate(|queue_status| { + Self::update_spot_traffic(&config, queue_status); + let traffic = queue_status.traffic; - // Charge the sending account the spot price - let _ = T::Currency::withdraw( - &sender, - spot_price, - WithdrawReasons::FEE, - existence_requirement, - )?; + // Calculate spot price + let spot_price: BalanceOf = traffic.saturating_mul_int( + config.scheduler_params.on_demand_base_fee.saturated_into::>(), + ); - let order = EnqueuedOrder::new(para_id); + // Is the current price higher than `max_amount` + ensure!(spot_price.le(&max_amount), Error::::SpotPriceHigherThanMaxAmount); - let res = Pallet::::add_on_demand_order(order, QueuePushDirection::Back); + // Charge the sending account the spot price + let _ = T::Currency::withdraw( + &sender, + spot_price, + WithdrawReasons::FEE, + existence_requirement, + )?; - if res.is_ok() { - Pallet::::deposit_event(Event::::OnDemandOrderPlaced { para_id, spot_price }); - } + ensure!( + queue_status.size() < config.scheduler_params.on_demand_queue_max_size, + Error::::QueueFull + ); + Pallet::::add_on_demand_order(queue_status, para_id, QueuePushDirection::Back); + Ok(()) + }) + } - res + /// Calculate and update spot traffic. + fn update_spot_traffic( + config: &configuration::HostConfiguration>, + queue_status: &mut QueueStatusType, + ) { + let old_traffic = queue_status.traffic; + match Self::calculate_spot_traffic( + old_traffic, + config.scheduler_params.on_demand_queue_max_size, + queue_status.size(), + config.scheduler_params.on_demand_target_queue_utilization, + config.scheduler_params.on_demand_fee_variability, + ) { + Ok(new_traffic) => { + // Only update storage on change + if new_traffic != old_traffic { + queue_status.traffic = new_traffic; + Pallet::::deposit_event(Event::::SpotTrafficSet { traffic: new_traffic }); + } + }, + Err(err) => { + log::debug!( + target: LOG_TARGET, + "Error calculating spot traffic: {:?}", err + ); + }, + }; } /// The spot price multiplier. This is based on the transaction fee calculations defined in: @@ -378,7 +632,7 @@ where /// - `SpotTrafficCalculationErr::QueueCapacityIsZero` /// - `SpotTrafficCalculationErr::QueueSizeLargerThanCapacity` /// - `SpotTrafficCalculationErr::Division` - pub(crate) fn calculate_spot_traffic( + fn calculate_spot_traffic( traffic: FixedU128, queue_capacity: u32, queue_size: u32, @@ -429,176 +683,141 @@ where /// Adds an order to the on demand queue. /// - /// Paramenters: - /// - `order`: The `EnqueuedOrder` to add to the queue. + /// Parameters: /// - `location`: Whether to push this entry to the back or the front of the queue. Pushing an /// entry to the front of the queue is only used when the scheduler wants to push back an /// entry it has already popped. - /// Returns: - /// - The unit type on success. - /// - /// Errors: - /// - `InvalidParaId` - /// - `QueueFull` fn add_on_demand_order( - order: EnqueuedOrder, + queue_status: &mut QueueStatusType, + para_id: ParaId, location: QueuePushDirection, - ) -> Result<(), DispatchError> { - // Only parathreads are valid paraids for on the go parachains. - ensure!(>::is_parathread(order.para_id), Error::::InvalidParaId); - - let config = >::config(); - - OnDemandQueue::::try_mutate(|queue| { - // Abort transaction if queue is too large - ensure!( - Self::queue_size() < config.scheduler_params.on_demand_queue_max_size, - Error::::QueueFull - ); - match location { - QueuePushDirection::Back => queue.push_back(order), - QueuePushDirection::Front => queue.push_front(order), - }; - Ok(()) - }) + ) { + let idx = match location { + QueuePushDirection::Back => queue_status.push_back(), + QueuePushDirection::Front => queue_status.push_front(), + }; + + let affinity = ParaIdAffinity::::get(para_id); + let order = EnqueuedOrder::new(idx, para_id); + #[cfg(test)] + log::debug!(target: LOG_TARGET, "add_on_demand_order, order: {:?}, affinity: {:?}, direction: {:?}", order, affinity, location); + + match affinity { + None => FreeEntries::::mutate(|entries| entries.push(order)), + Some(affinity) => + AffinityEntries::::mutate(affinity.core_index, |entries| entries.push(order)), + } } - /// Get the size of the on demand queue. + /// Decrease core affinity for para and update queue /// - /// Returns: - /// - The size of the on demand queue. - fn queue_size() -> u32 { - let config = >::config(); - match OnDemandQueue::::get().len().try_into() { - Ok(size) => return size, - Err(_) => { - log::debug!( - target: LOG_TARGET, - "Failed to fetch the on demand queue size, returning the max size." - ); - return config.scheduler_params.on_demand_queue_max_size - }, + /// if affinity dropped to 0, moving entries back to `FreeEntries`. + fn decrease_affinity_update_queue(para_id: ParaId, core_index: CoreIndex) { + let affinity = Pallet::::decrease_affinity(para_id, core_index); + #[cfg(not(test))] + debug_assert_ne!( + affinity, None, + "Decreased affinity for a para that has not been served on a core?" + ); + if affinity != Some(0) { + return } - } - - /// Getter for the order queue. - #[cfg(test)] - fn get_queue() -> VecDeque { - OnDemandQueue::::get() - } - - /// Getter for the affinity tracker. - pub fn get_affinity_map(para_id: ParaId) -> Option { - ParaIdAffinity::::get(para_id) + // No affinity more for entries on this core, free any entries: + // + // This is necessary to ensure them being served as the core might no longer exist at all. + AffinityEntries::::mutate(core_index, |affinity_entries| { + FreeEntries::::mutate(|free_entries| { + let (mut freed, affinities): (BinaryHeap<_>, BinaryHeap<_>) = + take(affinity_entries).into_iter().partition(|e| e.para_id == para_id); + free_entries.append(&mut freed); + *affinity_entries = affinities; + }) + }); } /// Decreases the affinity of a `ParaId` to a specified `CoreIndex`. - /// Subtracts from the count of the `CoreAffinityCount` if an entry is found and the core_idx + /// + /// Subtracts from the count of the `CoreAffinityCount` if an entry is found and the core_index /// matches. When the count reaches 0, the entry is removed. /// A non-existant entry is a no-op. - fn decrease_affinity(para_id: ParaId, core_idx: CoreIndex) { + /// + /// Returns: The new affinity of the para on that core. `None` if there is no affinity on this + /// core. + fn decrease_affinity(para_id: ParaId, core_index: CoreIndex) -> Option { ParaIdAffinity::::mutate(para_id, |maybe_affinity| { - if let Some(affinity) = maybe_affinity { - if affinity.core_idx == core_idx { - let new_count = affinity.count.saturating_sub(1); - if new_count > 0 { - *maybe_affinity = Some(CoreAffinityCount { core_idx, count: new_count }); - } else { - *maybe_affinity = None; - } + let affinity = maybe_affinity.as_mut()?; + if affinity.core_index == core_index { + let new_count = affinity.count.saturating_sub(1); + if new_count > 0 { + *maybe_affinity = Some(CoreAffinityCount { core_index, count: new_count }); + } else { + *maybe_affinity = None; } + return Some(new_count) + } else { + None } - }); + }) } /// Increases the affinity of a `ParaId` to a specified `CoreIndex`. - /// Adds to the count of the `CoreAffinityCount` if an entry is found and the core_idx matches. - /// A non-existant entry will be initialized with a count of 1 and uses the supplied + /// Adds to the count of the `CoreAffinityCount` if an entry is found and the core_index + /// matches. A non-existent entry will be initialized with a count of 1 and uses the supplied /// `CoreIndex`. - fn increase_affinity(para_id: ParaId, core_idx: CoreIndex) { + fn increase_affinity(para_id: ParaId, core_index: CoreIndex) { ParaIdAffinity::::mutate(para_id, |maybe_affinity| match maybe_affinity { Some(affinity) => - if affinity.core_idx == core_idx { + if affinity.core_index == core_index { *maybe_affinity = Some(CoreAffinityCount { - core_idx, + core_index, count: affinity.count.saturating_add(1), }); }, None => { - *maybe_affinity = Some(CoreAffinityCount { core_idx, count: 1 }); + *maybe_affinity = Some(CoreAffinityCount { core_index, count: 1 }); }, }) } -} -impl Pallet { - /// Take the next queued entry that is available for a given core index. - /// Invalidates and removes orders with a `para_id` that is not `ParaLifecycle::Parathread` - /// but only in [0..P] range slice of the order queue, where P is the element that is - /// removed from the order queue. - /// - /// Parameters: - /// - `core_idx`: The core index - pub fn pop_assignment_for_core(core_idx: CoreIndex) -> Option { - let mut queue: VecDeque = OnDemandQueue::::get(); - - let mut invalidated_para_id_indexes: Vec = vec![]; - - // Get the position of the next `ParaId`. Select either a valid `ParaId` that has an - // affinity to the same `CoreIndex` as the scheduler asks for or a valid `ParaId` with no - // affinity at all. - let pos = queue.iter().enumerate().position(|(index, assignment)| { - if >::is_parathread(assignment.para_id) { - match ParaIdAffinity::::get(&assignment.para_id) { - Some(affinity) => return affinity.core_idx == core_idx, - None => return true, - } - } - // Record no longer valid para_ids. - invalidated_para_id_indexes.push(index); - return false - }); + /// Getter for the affinity tracker. + #[cfg(test)] + fn get_affinity_map(para_id: ParaId) -> Option { + ParaIdAffinity::::get(para_id) + } - // Collect the popped value. - let popped = pos.and_then(|p: usize| { - if let Some(assignment) = queue.remove(p) { - Pallet::::increase_affinity(assignment.para_id, core_idx); - return Some(assignment) - }; - None - }); + /// Getter for the affinity entries. + #[cfg(test)] + fn get_affinity_entries(core_index: CoreIndex) -> BinaryHeap { + AffinityEntries::::get(core_index) + } - // Only remove the invalid indexes *after* using the index. - // Removed in reverse order so that the indexes don't shift. - invalidated_para_id_indexes.iter().rev().for_each(|idx| { - queue.remove(*idx); - }); + /// Getter for the free entries. + #[cfg(test)] + fn get_free_entries() -> BinaryHeap { + FreeEntries::::get() + } - // Write changes to storage. - OnDemandQueue::::set(queue); + #[cfg(feature = "runtime-benchmarks")] + pub fn populate_queue(para_id: ParaId, num: u32) { + QueueStatus::::mutate(|queue_status| { + for _ in 0..num { + Pallet::::add_on_demand_order(queue_status, para_id, QueuePushDirection::Back); + } + }); + } - popped.map(|p| Assignment::Pool { para_id: p.para_id, core_index: core_idx }) + #[cfg(test)] + fn set_queue_status(new_status: QueueStatusType) { + QueueStatus::::set(new_status); } - /// Report that the `para_id` & `core_index` combination was processed. - pub fn report_processed(para_id: ParaId, core_index: CoreIndex) { - Pallet::::decrease_affinity(para_id, core_index) + #[cfg(test)] + fn get_queue_status() -> QueueStatusType { + QueueStatus::::get() } - /// Push an assignment back to the front of the queue. - /// - /// The assignment has not been processed yet. Typically used on session boundaries. - /// Parameters: - /// - `assignment`: The on demand assignment. - pub fn push_back_assignment(para_id: ParaId, core_index: CoreIndex) { - Pallet::::decrease_affinity(para_id, core_index); - // Skip the queue on push backs from scheduler - match Pallet::::add_on_demand_order( - EnqueuedOrder::new(para_id), - QueuePushDirection::Front, - ) { - Ok(_) => {}, - Err(_) => {}, - } + #[cfg(test)] + fn get_traffic_default_value() -> FixedU128 { + ::TrafficDefaultValue::get() } } diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs b/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs index a3c9f91ea426..3720f618b542 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs @@ -73,11 +73,24 @@ fn run_to_block( Paras::initializer_initialize(b + 1); Scheduler::initializer_initialize(b + 1); + // We need to update the spot traffic on every block. + OnDemandAssigner::on_initialize(b + 1); + // In the real runtime this is expected to be called by the `InclusionInherent` pallet. Scheduler::free_cores_and_fill_claimqueue(BTreeMap::new(), b + 1); } } +fn place_order(para_id: ParaId) { + let alice = 100u64; + let amt = 10_000_000u128; + + Balances::make_free_balance_be(&alice, amt); + + run_to_block(101, |n| if n == 101 { Some(Default::default()) } else { None }); + OnDemandAssigner::place_order_allow_death(RuntimeOrigin::signed(alice), amt, para_id).unwrap() +} + #[test] fn spot_traffic_capacity_zero_returns_none() { match OnDemandAssigner::calculate_spot_traffic( @@ -201,6 +214,42 @@ fn spot_traffic_decreases_over_time() { assert_eq!(traffic, FixedU128::from_inner(3_125_000_000_000_000_000u128)) } +#[test] +fn spot_traffic_decreases_between_idle_blocks() { + // Testing spot traffic assumptions, but using the mock runtime and default on demand + // configuration values. Ensuring that blocks with no on demand activity (idle) + // decrease traffic. + + let para_id = ParaId::from(111); + + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + // Initialize the parathread and wait for it to be ready. + schedule_blank_para(para_id, ParaKind::Parathread); + assert!(!Paras::is_parathread(para_id)); + run_to_block(100, |n| if n == 100 { Some(Default::default()) } else { None }); + assert!(Paras::is_parathread(para_id)); + + // Set the spot traffic to a large number + OnDemandAssigner::set_queue_status(QueueStatusType { + traffic: FixedU128::from_u32(10), + ..Default::default() + }); + + assert_eq!(OnDemandAssigner::get_queue_status().traffic, FixedU128::from_u32(10)); + + // Run to block 101 and ensure that the traffic decreases. + run_to_block(101, |n| if n == 100 { Some(Default::default()) } else { None }); + assert!(OnDemandAssigner::get_queue_status().traffic < FixedU128::from_u32(10)); + + // Run to block 102 and observe that we've hit the default traffic value. + run_to_block(102, |n| if n == 100 { Some(Default::default()) } else { None }); + assert_eq!( + OnDemandAssigner::get_queue_status().traffic, + OnDemandAssigner::get_traffic_default_value() + ); + }) +} + #[test] fn place_order_works() { let alice = 1u64; @@ -278,74 +327,6 @@ fn place_order_keep_alive_keeps_alive() { }); } -#[test] -fn add_on_demand_order_works() { - let para_a = ParaId::from(111); - let order = EnqueuedOrder::new(para_a); - - let mut genesis = GenesisConfigBuilder::default(); - genesis.on_demand_max_queue_size = 1; - new_test_ext(genesis.build()).execute_with(|| { - // Initialize the parathread and wait for it to be ready. - schedule_blank_para(para_a, ParaKind::Parathread); - - // `para_a` is not onboarded as a parathread yet. - assert_noop!( - OnDemandAssigner::add_on_demand_order(order.clone(), QueuePushDirection::Back), - Error::::InvalidParaId - ); - - assert!(!Paras::is_parathread(para_a)); - run_to_block(100, |n| if n == 100 { Some(Default::default()) } else { None }); - assert!(Paras::is_parathread(para_a)); - - // `para_a` is now onboarded as a valid parathread. - assert_ok!(OnDemandAssigner::add_on_demand_order(order.clone(), QueuePushDirection::Back)); - - // Max queue size is 1, queue should be full. - assert_noop!( - OnDemandAssigner::add_on_demand_order(order, QueuePushDirection::Back), - Error::::QueueFull - ); - }); -} - -#[test] -fn spotqueue_push_directions() { - new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { - let para_a = ParaId::from(111); - let para_b = ParaId::from(222); - let para_c = ParaId::from(333); - - schedule_blank_para(para_a, ParaKind::Parathread); - schedule_blank_para(para_b, ParaKind::Parathread); - schedule_blank_para(para_c, ParaKind::Parathread); - - run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); - - let order_a = EnqueuedOrder::new(para_a); - let order_b = EnqueuedOrder::new(para_b); - let order_c = EnqueuedOrder::new(para_c); - - assert_ok!(OnDemandAssigner::add_on_demand_order( - order_a.clone(), - QueuePushDirection::Front - )); - assert_ok!(OnDemandAssigner::add_on_demand_order( - order_b.clone(), - QueuePushDirection::Front - )); - - assert_ok!(OnDemandAssigner::add_on_demand_order( - order_c.clone(), - QueuePushDirection::Back - )); - - assert_eq!(OnDemandAssigner::queue_size(), 3); - assert_eq!(OnDemandAssigner::get_queue(), VecDeque::from(vec![order_b, order_a, order_c])) - }); -} - #[test] fn pop_assignment_for_core_works() { new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { @@ -356,51 +337,32 @@ fn pop_assignment_for_core_works() { run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); - let order_a = EnqueuedOrder::new(para_a); - let order_b = EnqueuedOrder::new(para_b); - let assignment_a = Assignment::Pool { para_id: para_a, core_index: CoreIndex(0) }; - let assignment_b = Assignment::Pool { para_id: para_b, core_index: CoreIndex(1) }; - // Pop should return none with empty queue assert_eq!(OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)), None); // Add enough assignments to the order queue. for _ in 0..2 { - OnDemandAssigner::add_on_demand_order(order_a.clone(), QueuePushDirection::Back) - .expect("Invalid paraid or queue full"); - - OnDemandAssigner::add_on_demand_order(order_b.clone(), QueuePushDirection::Back) - .expect("Invalid paraid or queue full"); - } - - // Queue should contain orders a, b, a, b - { - let queue: Vec = OnDemandQueue::::get().into_iter().collect(); - assert_eq!( - queue, - vec![order_a.clone(), order_b.clone(), order_a.clone(), order_b.clone()] - ); + place_order(para_a); + place_order(para_b); } // Popped assignments should be for the correct paras and cores assert_eq!( - OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)), - Some(assignment_a.clone()) + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)).map(|a| a.para_id()), + Some(para_a) ); assert_eq!( - OnDemandAssigner::pop_assignment_for_core(CoreIndex(1)), - Some(assignment_b.clone()) + OnDemandAssigner::pop_assignment_for_core(CoreIndex(1)).map(|a| a.para_id()), + Some(para_b) ); assert_eq!( - OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)), - Some(assignment_a.clone()) + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)).map(|a| a.para_id()), + Some(para_a) + ); + assert_eq!( + OnDemandAssigner::pop_assignment_for_core(CoreIndex(1)).map(|a| a.para_id()), + Some(para_b) ); - - // Queue should contain one left over order - { - let queue: Vec = OnDemandQueue::::get().into_iter().collect(); - assert_eq!(queue, vec![order_b.clone(),]); - } }); } @@ -414,28 +376,19 @@ fn push_back_assignment_works() { run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); - let order_a = EnqueuedOrder::new(para_a); - let order_b = EnqueuedOrder::new(para_b); - // Add enough assignments to the order queue. - OnDemandAssigner::add_on_demand_order(order_a.clone(), QueuePushDirection::Back) - .expect("Invalid paraid or queue full"); - - OnDemandAssigner::add_on_demand_order(order_b.clone(), QueuePushDirection::Back) - .expect("Invalid paraid or queue full"); + place_order(para_a); + place_order(para_b); // Pop order a - OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)); + assert_eq!( + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)).unwrap().para_id(), + para_a + ); // Para a should have affinity for core 0 assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 1); - assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().core_idx, CoreIndex(0)); - - // Queue should still contain order b - { - let queue: Vec = OnDemandQueue::::get().into_iter().collect(); - assert_eq!(queue, vec![order_b.clone()]); - } + assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().core_index, CoreIndex(0)); // Push back order a OnDemandAssigner::push_back_assignment(para_a, CoreIndex(0)); @@ -444,10 +397,82 @@ fn push_back_assignment_works() { assert_eq!(OnDemandAssigner::get_affinity_map(para_a).is_none(), true); // Queue should contain orders a, b. A in front of b. - { - let queue: Vec = OnDemandQueue::::get().into_iter().collect(); - assert_eq!(queue, vec![order_a.clone(), order_b.clone()]); + assert_eq!( + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)).unwrap().para_id(), + para_a + ); + assert_eq!( + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)).unwrap().para_id(), + para_b + ); + }); +} + +#[test] +fn affinity_prohibits_parallel_scheduling() { + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + let para_a = ParaId::from(111); + let para_b = ParaId::from(222); + + schedule_blank_para(para_a, ParaKind::Parathread); + schedule_blank_para(para_b, ParaKind::Parathread); + + run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); + + // There should be no affinity before starting. + assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); + assert!(OnDemandAssigner::get_affinity_map(para_b).is_none()); + + // Add 2 assignments for para_a for every para_b. + place_order(para_a); + place_order(para_a); + place_order(para_b); + + // Approximate having 1 core. + for _ in 0..3 { + assert!(OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)).is_some()); } + assert!(OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)).is_none()); + + // Affinity on one core is meaningless. + assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 2); + assert_eq!(OnDemandAssigner::get_affinity_map(para_b).unwrap().count, 1); + assert_eq!( + OnDemandAssigner::get_affinity_map(para_a).unwrap().core_index, + OnDemandAssigner::get_affinity_map(para_b).unwrap().core_index, + ); + + // Clear affinity + OnDemandAssigner::report_processed(para_a, 0.into()); + OnDemandAssigner::report_processed(para_a, 0.into()); + OnDemandAssigner::report_processed(para_b, 0.into()); + + // Add 2 assignments for para_a for every para_b. + place_order(para_a); + place_order(para_a); + place_order(para_b); + + // Approximate having 3 cores. CoreIndex 2 should be unable to obtain an assignment + for _ in 0..3 { + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)); + OnDemandAssigner::pop_assignment_for_core(CoreIndex(1)); + assert!(OnDemandAssigner::pop_assignment_for_core(CoreIndex(2)).is_none()); + } + + // Affinity should be the same as before, but on different cores. + assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 2); + assert_eq!(OnDemandAssigner::get_affinity_map(para_b).unwrap().count, 1); + assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().core_index, CoreIndex(0)); + assert_eq!(OnDemandAssigner::get_affinity_map(para_b).unwrap().core_index, CoreIndex(1)); + + // Clear affinity + OnDemandAssigner::report_processed(para_a, CoreIndex(0)); + OnDemandAssigner::report_processed(para_a, CoreIndex(0)); + OnDemandAssigner::report_processed(para_b, CoreIndex(1)); + + // There should be no affinity after clearing. + assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); + assert!(OnDemandAssigner::get_affinity_map(para_b).is_none()); }); } @@ -458,7 +483,6 @@ fn affinity_changes_work() { let core_index = CoreIndex(0); schedule_blank_para(para_a, ParaKind::Parathread); - let order_a = EnqueuedOrder::new(para_a); run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); // There should be no affinity before starting. @@ -466,8 +490,7 @@ fn affinity_changes_work() { // Add enough assignments to the order queue. for _ in 0..10 { - OnDemandAssigner::add_on_demand_order(order_a.clone(), QueuePushDirection::Front) - .expect("Invalid paraid or queue full"); + place_order(para_a); } // There should be no affinity before the scheduler pops. @@ -483,7 +506,6 @@ fn affinity_changes_work() { // Affinity count is 1 after popping with a previous para. assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 1); - assert_eq!(OnDemandAssigner::queue_size(), 8); for _ in 0..3 { OnDemandAssigner::pop_assignment_for_core(core_index); @@ -491,147 +513,197 @@ fn affinity_changes_work() { // Affinity count is 4 after popping 3 times without a previous para. assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 4); - assert_eq!(OnDemandAssigner::queue_size(), 5); for _ in 0..5 { OnDemandAssigner::report_processed(para_a, 0.into()); - OnDemandAssigner::pop_assignment_for_core(core_index); + assert!(OnDemandAssigner::pop_assignment_for_core(core_index).is_some()); } // Affinity count should still be 4 but queue should be empty. + assert!(OnDemandAssigner::pop_assignment_for_core(core_index).is_none()); assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 4); - assert_eq!(OnDemandAssigner::queue_size(), 0); // Pop 4 times and get to exactly 0 (None) affinity. for _ in 0..4 { OnDemandAssigner::report_processed(para_a, 0.into()); - OnDemandAssigner::pop_assignment_for_core(core_index); + assert!(OnDemandAssigner::pop_assignment_for_core(core_index).is_none()); } assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); // Decreasing affinity beyond 0 should still be None. OnDemandAssigner::report_processed(para_a, 0.into()); - OnDemandAssigner::pop_assignment_for_core(core_index); + assert!(OnDemandAssigner::pop_assignment_for_core(core_index).is_none()); assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); }); } #[test] -fn affinity_prohibits_parallel_scheduling() { - new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { - let para_a = ParaId::from(111); - let para_b = ParaId::from(222); +fn new_affinity_for_a_core_must_come_from_free_entries() { + // If affinity count for a core was zero before, and is 1 now, then the entry + // must have come from free_entries. + let parachains = + vec![ParaId::from(111), ParaId::from(222), ParaId::from(333), ParaId::from(444)]; + let core_indices = vec![CoreIndex(0), CoreIndex(1), CoreIndex(2), CoreIndex(3)]; - schedule_blank_para(para_a, ParaKind::Parathread); - schedule_blank_para(para_b, ParaKind::Parathread); + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + parachains.iter().for_each(|chain| { + schedule_blank_para(*chain, ParaKind::Parathread); + }); run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); - let order_a = EnqueuedOrder::new(para_a); - let order_b = EnqueuedOrder::new(para_b); - - // There should be no affinity before starting. - assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); - assert!(OnDemandAssigner::get_affinity_map(para_b).is_none()); - - // Add 2 assignments for para_a for every para_b. - OnDemandAssigner::add_on_demand_order(order_a.clone(), QueuePushDirection::Back) - .expect("Invalid paraid or queue full"); - - OnDemandAssigner::add_on_demand_order(order_a.clone(), QueuePushDirection::Back) - .expect("Invalid paraid or queue full"); - - OnDemandAssigner::add_on_demand_order(order_b.clone(), QueuePushDirection::Back) - .expect("Invalid paraid or queue full"); - - assert_eq!(OnDemandAssigner::queue_size(), 3); + // Place orders for all chains. + parachains.iter().for_each(|chain| { + place_order(*chain); + }); + + // There are 4 entries in free_entries. + let start_free_entries = OnDemandAssigner::get_free_entries().len(); + assert_eq!(start_free_entries, 4); + + // Pop assignments on all cores. + core_indices.iter().enumerate().for_each(|(n, core_index)| { + // There is no affinity on the core prior to popping. + assert!(OnDemandAssigner::get_affinity_entries(*core_index).is_empty()); + + // There's always an order to be popped for each core. + let free_entries = OnDemandAssigner::get_free_entries(); + let next_order = free_entries.peek(); + + // There is no affinity on the paraid prior to popping. + assert!(OnDemandAssigner::get_affinity_map(next_order.unwrap().para_id).is_none()); + + match OnDemandAssigner::pop_assignment_for_core(*core_index) { + Some(assignment) => { + // The popped assignment came from free entries. + assert_eq!( + start_free_entries - 1 - n, + OnDemandAssigner::get_free_entries().len() + ); + // The popped assignment has the same para id as the next order. + assert_eq!(assignment.para_id(), next_order.unwrap().para_id); + }, + None => panic!("Should not happen"), + } + }); - // Approximate having 1 core. - for _ in 0..3 { - OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)); - } + // All entries have been removed from free_entries. + assert!(OnDemandAssigner::get_free_entries().is_empty()); - // Affinity on one core is meaningless. - assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 2); - assert_eq!(OnDemandAssigner::get_affinity_map(para_b).unwrap().count, 1); - assert_eq!( - OnDemandAssigner::get_affinity_map(para_a).unwrap().core_idx, - OnDemandAssigner::get_affinity_map(para_b).unwrap().core_idx - ); - - // Clear affinity - OnDemandAssigner::report_processed(para_a, 0.into()); - OnDemandAssigner::report_processed(para_a, 0.into()); - OnDemandAssigner::report_processed(para_b, 0.into()); - - // Add 2 assignments for para_a for every para_b. - OnDemandAssigner::add_on_demand_order(order_a.clone(), QueuePushDirection::Back) - .expect("Invalid paraid or queue full"); + // All chains have an affinity count of 1. + parachains.iter().for_each(|chain| { + assert_eq!(OnDemandAssigner::get_affinity_map(*chain).unwrap().count, 1); + }); + }); +} - OnDemandAssigner::add_on_demand_order(order_a.clone(), QueuePushDirection::Back) - .expect("Invalid paraid or queue full"); +#[test] +#[should_panic] +fn queue_index_ordering_is_unsound_over_max_size() { + // NOTE: Unsoundness proof. If the number goes sufficiently over the max_queue_max_size + // the overflow will cause an opposite comparison to what would be expected. + let max_num = u32::MAX - ON_DEMAND_MAX_QUEUE_MAX_SIZE; + // 0 < some large number. + assert_eq!(QueueIndex(0).cmp(&QueueIndex(max_num + 1)), Ordering::Less); +} - OnDemandAssigner::add_on_demand_order(order_b.clone(), QueuePushDirection::Back) - .expect("Invalid paraid or queue full"); +#[test] +fn queue_index_ordering_works() { + // The largest accepted queue size. + let max_num = ON_DEMAND_MAX_QUEUE_MAX_SIZE; + + // 0 == 0 + assert_eq!(QueueIndex(0).cmp(&QueueIndex(0)), Ordering::Equal); + // 0 < 1 + assert_eq!(QueueIndex(0).cmp(&QueueIndex(1)), Ordering::Less); + // 1 > 0 + assert_eq!(QueueIndex(1).cmp(&QueueIndex(0)), Ordering::Greater); + // 0 < max_num + assert_eq!(QueueIndex(0).cmp(&QueueIndex(max_num)), Ordering::Less); + // 0 > max_num + 1 + assert_eq!(QueueIndex(0).cmp(&QueueIndex(max_num + 1)), Ordering::Less); + + // Ordering within the bounds of ON_DEMAND_MAX_QUEUE_MAX_SIZE works. + let mut v = vec![3, 6, 2, 1, 5, 4]; + v.sort_by_key(|&num| QueueIndex(num)); + assert_eq!(v, vec![1, 2, 3, 4, 5, 6]); + + v = vec![max_num, 4, 5, 1, 6]; + v.sort_by_key(|&num| QueueIndex(num)); + assert_eq!(v, vec![1, 4, 5, 6, max_num]); + + // Ordering with an element outside of the bounds of the max size also works. + v = vec![max_num + 2, 0, 6, 2, 1, 5, 4]; + v.sort_by_key(|&num| QueueIndex(num)); + assert_eq!(v, vec![0, 1, 2, 4, 5, 6, max_num + 2]); + + // Numbers way above the max size will overflow + v = vec![u32::MAX - 1, u32::MAX, 6, 2, 1, 5, 4]; + v.sort_by_key(|&num| QueueIndex(num)); + assert_eq!(v, vec![u32::MAX - 1, u32::MAX, 1, 2, 4, 5, 6]); +} - // Approximate having 3 cores. CoreIndex 2 should be unable to obtain an assignment - for _ in 0..3 { - OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)); - OnDemandAssigner::pop_assignment_for_core(CoreIndex(1)); - assert_eq!(None, OnDemandAssigner::pop_assignment_for_core(CoreIndex(2))); - } +#[test] +fn reverse_queue_index_does_reverse() { + let mut v = vec![1, 2, 3, 4, 5, 6]; - // Affinity should be the same as before, but on different cores. - assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 2); - assert_eq!(OnDemandAssigner::get_affinity_map(para_b).unwrap().count, 1); - assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().core_idx, CoreIndex(0)); - assert_eq!(OnDemandAssigner::get_affinity_map(para_b).unwrap().core_idx, CoreIndex(1)); + // Basic reversal of a vector. + v.sort_by_key(|&num| ReverseQueueIndex(num)); + assert_eq!(v, vec![6, 5, 4, 3, 2, 1]); - // Clear affinity - OnDemandAssigner::report_processed(para_a, 0.into()); - OnDemandAssigner::report_processed(para_a, 0.into()); - OnDemandAssigner::report_processed(para_b, 1.into()); + // Example from rust docs on `Reverse`. Should work identically. + v.sort_by_key(|&num| (num > 3, ReverseQueueIndex(num))); + assert_eq!(v, vec![3, 2, 1, 6, 5, 4]); - // There should be no affinity after clearing. - assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); - assert!(OnDemandAssigner::get_affinity_map(para_b).is_none()); - }); + let mut v2 = vec![1, 2, u32::MAX]; + v2.sort_by_key(|&num| ReverseQueueIndex(num)); + assert_eq!(v2, vec![2, 1, u32::MAX]); } #[test] -fn on_demand_orders_cannot_be_popped_if_lifecycle_changes() { - let para_id = ParaId::from(10); - let core_index = CoreIndex(0); - let order = EnqueuedOrder::new(para_id); +fn queue_status_size_fn_works() { + // Add orders to the on demand queue, and make sure that they are properly represented + // by the QueueStatusType::size fn. + let parachains = vec![ParaId::from(111), ParaId::from(222), ParaId::from(333)]; + let core_indices = vec![CoreIndex(0), CoreIndex(1)]; new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { - // Register the para_id as a parathread - schedule_blank_para(para_id, ParaKind::Parathread); - - assert!(!Paras::is_parathread(para_id)); - run_to_block(10, |n| if n == 10 { Some(Default::default()) } else { None }); - assert!(Paras::is_parathread(para_id)); + parachains.iter().for_each(|chain| { + schedule_blank_para(*chain, ParaKind::Parathread); + }); - // Add two assignments for a para_id with a valid lifecycle. - assert_ok!(OnDemandAssigner::add_on_demand_order(order.clone(), QueuePushDirection::Back)); - assert_ok!(OnDemandAssigner::add_on_demand_order(order.clone(), QueuePushDirection::Back)); + assert_eq!(OnDemandAssigner::get_queue_status().size(), 0); - // First pop is fine - assert!( - OnDemandAssigner::pop_assignment_for_core(core_index) == - Some(Assignment::Pool { para_id, core_index }) - ); + run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); - // Deregister para - assert_ok!(Paras::schedule_para_cleanup(para_id)); + // Place orders for all chains. + parachains.iter().for_each(|chain| { + // 2 per chain for a total of 6 + place_order(*chain); + place_order(*chain); + }); - // Run to new session and verify that para_id is no longer a valid parathread. - assert!(Paras::is_parathread(para_id)); - run_to_block(20, |n| if n == 20 { Some(Default::default()) } else { None }); - assert!(!Paras::is_parathread(para_id)); + // 6 orders in free entries + assert_eq!(OnDemandAssigner::get_free_entries().len(), 6); + // 6 orders via queue status size + assert_eq!( + OnDemandAssigner::get_free_entries().len(), + OnDemandAssigner::get_queue_status().size() as usize + ); - // Second pop should be None. - OnDemandAssigner::report_processed(para_id, core_index); - assert_eq!(OnDemandAssigner::pop_assignment_for_core(core_index), None); + core_indices.iter().for_each(|core_index| { + OnDemandAssigner::pop_assignment_for_core(*core_index); + }); + + // There should be 2 orders in the scheduler's claimqueue, + // 2 in assorted AffinityMaps and 2 in free. + // ParaId 111 + assert_eq!(OnDemandAssigner::get_affinity_entries(core_indices[0]).len(), 1); + // ParaId 222 + assert_eq!(OnDemandAssigner::get_affinity_entries(core_indices[1]).len(), 1); + // Free entries are from ParaId 333 + assert_eq!(OnDemandAssigner::get_free_entries().len(), 2); + // For a total size of 4. + assert_eq!(OnDemandAssigner::get_queue_status().size(), 4) }); } diff --git a/polkadot/runtime/parachains/src/builder.rs b/polkadot/runtime/parachains/src/builder.rs index f06b44e3ca23..e5ee805b0076 100644 --- a/polkadot/runtime/parachains/src/builder.rs +++ b/polkadot/runtime/parachains/src/builder.rs @@ -23,18 +23,18 @@ use crate::{ }; #[cfg(not(feature = "std"))] use alloc::{vec, vec::Vec}; -use alloc::{collections::{btree_map::BTreeMap, vec_deque::VecDeque}}; +use alloc::{collections::{btree_map::BTreeMap, btree_set::BTreeSet, vec_deque::VecDeque}}; use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; use primitives::{ - collator_signature_payload, vstaging::node_features::FeatureIndex, AvailabilityBitfield, - BackedCandidate, CandidateCommitments, CandidateDescriptor, CandidateHash, CollatorId, - CollatorSignature, CommittedCandidateReceipt, CompactStatement, CoreIndex, DisputeStatement, - DisputeStatementSet, GroupIndex, HeadData, Id as ParaId, IndexedVec, - InherentData as ParachainsInherentData, InvalidDisputeStatementKind, PersistedValidationData, - SessionIndex, SigningContext, UncheckedSigned, ValidDisputeStatementKind, ValidationCode, - ValidatorId, ValidatorIndex, ValidityAttestation, + collator_signature_payload, node_features::FeatureIndex, AvailabilityBitfield, BackedCandidate, + CandidateCommitments, CandidateDescriptor, CandidateHash, CollatorId, CollatorSignature, + CommittedCandidateReceipt, CompactStatement, CoreIndex, DisputeStatement, DisputeStatementSet, + GroupIndex, HeadData, Id as ParaId, IndexedVec, InherentData as ParachainsInherentData, + InvalidDisputeStatementKind, PersistedValidationData, SessionIndex, SigningContext, + UncheckedSigned, ValidDisputeStatementKind, ValidationCode, ValidatorId, ValidatorIndex, + ValidityAttestation, }; use sp_core::{sr25519, H256}; use sp_runtime::{ @@ -102,6 +102,8 @@ pub(crate) struct BenchBuilder { code_upgrade: Option, /// Specifies whether the claimqueue should be filled. fill_claimqueue: bool, + /// Cores which should not be available when being populated with pending candidates. + unavailable_cores: Vec, _phantom: core::marker::PhantomData, } @@ -131,6 +133,7 @@ impl BenchBuilder { elastic_paras: Default::default(), code_upgrade: None, fill_claimqueue: true, + unavailable_cores: vec![], _phantom: core::marker::PhantomData::, } } @@ -147,6 +150,12 @@ impl BenchBuilder { self } + /// Set the cores which should not be available when being populated with pending candidates. + pub(crate) fn set_unavailable_cores(mut self, unavailable_cores: Vec) -> Self { + self.unavailable_cores = unavailable_cores; + self + } + /// Set a map from para id seed to number of validity votes. pub(crate) fn set_backed_and_concluding_paras( mut self, @@ -157,7 +166,6 @@ impl BenchBuilder { } /// Set a map from para id seed to number of cores assigned to it. - #[cfg(feature = "runtime-benchmarks")] pub(crate) fn set_elastic_paras(mut self, elastic_paras: BTreeMap) -> Self { self.elastic_paras = elastic_paras; self @@ -270,7 +278,7 @@ impl BenchBuilder { persisted_validation_data_hash: Default::default(), pov_hash: Default::default(), erasure_root: Default::default(), - signature: CollatorSignature::from(sr25519::Signature([42u8; 64])), + signature: CollatorSignature::from(sr25519::Signature::from_raw([42u8; 64])), para_head: Default::default(), validation_code_hash: mock_validation_code().hash(), } @@ -282,11 +290,13 @@ impl BenchBuilder { core_idx: CoreIndex, candidate_hash: CandidateHash, availability_votes: BitVec, + commitments: CandidateCommitments, ) -> inclusion::CandidatePendingAvailability> { inclusion::CandidatePendingAvailability::>::new( core_idx, // core candidate_hash, // hash Self::candidate_descriptor_mock(), // candidate descriptor + commitments, // commitments availability_votes, // availability votes Default::default(), // backers Zero::zero(), // relay parent @@ -307,12 +317,6 @@ impl BenchBuilder { availability_votes: BitVec, candidate_hash: CandidateHash, ) { - let candidate_availability = Self::candidate_availability_mock( - group_idx, - core_idx, - candidate_hash, - availability_votes, - ); let commitments = CandidateCommitments:: { upward_messages: Default::default(), horizontal_messages: Default::default(), @@ -321,16 +325,29 @@ impl BenchBuilder { processed_downward_messages: 0, hrmp_watermark: 0u32.into(), }; - inclusion::PendingAvailability::::insert(para_id, candidate_availability); - inclusion::PendingAvailabilityCommitments::::insert(¶_id, commitments); + let candidate_availability = Self::candidate_availability_mock( + group_idx, + core_idx, + candidate_hash, + availability_votes, + commitments, + ); + inclusion::PendingAvailability::::mutate(para_id, |maybe_andidates| { + if let Some(candidates) = maybe_andidates { + candidates.push_back(candidate_availability); + } else { + *maybe_andidates = + Some([candidate_availability].into_iter().collect::>()); + } + }); } /// Create an `AvailabilityBitfield` where `concluding` is a map where each key is a core index /// that is concluding and `cores` is the total number of cores in the system. - fn availability_bitvec(concluding: &BTreeMap, cores: usize) -> AvailabilityBitfield { + fn availability_bitvec(concluding_cores: &BTreeSet, cores: usize) -> AvailabilityBitfield { let mut bitfields = bitvec::bitvec![u8, bitvec::order::Lsb0; 0; 0]; for i in 0..cores { - if concluding.get(&(i as u32)).is_some() { + if concluding_cores.contains(&(i as u32)) { bitfields.push(true); } else { bitfields.push(false) @@ -354,13 +371,13 @@ impl BenchBuilder { } } - /// Register `cores` count of parachains. + /// Register `n_paras` count of parachains. /// /// Note that this must be called at least 2 sessions before the target session as there is a /// n+2 session delay for the scheduled actions to take effect. - fn setup_para_ids(cores: usize) { + fn setup_para_ids(n_paras: usize) { // make sure parachains exist prior to session change. - for i in 0..cores { + for i in 0..n_paras { let para_id = ParaId::from(i as u32); let validation_code = mock_validation_code(); @@ -470,24 +487,8 @@ impl BenchBuilder { let validators = self.validators.as_ref().expect("must have some validators prior to calling"); - let availability_bitvec = Self::availability_bitvec(concluding_paras, total_cores); - - let bitfields: Vec> = validators - .iter() - .enumerate() - .map(|(i, public)| { - let unchecked_signed = UncheckedSigned::::benchmark_sign( - public, - availability_bitvec.clone(), - &self.signing_context(), - ValidatorIndex(i as u32), - ); - - unchecked_signed - }) - .collect(); - let mut current_core_idx = 0u32; + let mut concluding_cores = BTreeSet::new(); for (seed, _) in concluding_paras.iter() { // make sure the candidates that will be concluding are marked as pending availability. @@ -503,13 +504,34 @@ impl BenchBuilder { para_id, core_idx, group_idx, - Self::validator_availability_votes_yes(validators.len()), + // No validators have made this candidate available yet. + bitvec::bitvec![u8, bitvec::order::Lsb0; 0; validators.len()], CandidateHash(H256::from(byte32_slice_from(current_core_idx))), ); + if !self.unavailable_cores.contains(¤t_core_idx) { + concluding_cores.insert(current_core_idx); + } current_core_idx += 1; } } + let availability_bitvec = Self::availability_bitvec(&concluding_cores, total_cores); + + let bitfields: Vec> = validators + .iter() + .enumerate() + .map(|(i, public)| { + let unchecked_signed = UncheckedSigned::::benchmark_sign( + public, + availability_bitvec.clone(), + &self.signing_context(), + ValidatorIndex(i as u32), + ); + + unchecked_signed + }) + .collect(); + bitfields } @@ -520,7 +542,7 @@ impl BenchBuilder { /// validity votes. fn create_backed_candidates( &self, - cores_with_backed_candidates: &BTreeMap, + paras_with_backed_candidates: &BTreeMap, elastic_paras: &BTreeMap, includes_code_upgrade: Option, ) -> Vec> { @@ -529,7 +551,7 @@ impl BenchBuilder { let config = configuration::Pallet::::config(); let mut current_core_idx = 0u32; - cores_with_backed_candidates + paras_with_backed_candidates .iter() .flat_map(|(seed, num_votes)| { assert!(*num_votes <= validators.len() as u32); @@ -758,7 +780,7 @@ impl BenchBuilder { // NOTE: there is an n+2 session delay for these actions to take effect. // We are currently in Session 0, so these changes will take effect in Session 2. - Self::setup_para_ids(used_cores); + Self::setup_para_ids(used_cores - extra_cores); configuration::ActiveConfig::::mutate(|c| { c.scheduler_params.num_cores = used_cores as u32; }); @@ -780,11 +802,11 @@ impl BenchBuilder { let disputes = builder.create_disputes( builder.backed_and_concluding_paras.len() as u32, - used_cores as u32, + (used_cores - extra_cores) as u32, builder.dispute_sessions.as_slice(), ); let mut disputed_cores = (builder.backed_and_concluding_paras.len() as u32.. - used_cores as u32) + ((used_cores - extra_cores) as u32)) .into_iter() .map(|idx| (idx, 0)) .collect::>(); @@ -792,7 +814,7 @@ impl BenchBuilder { let mut all_cores = builder.backed_and_concluding_paras.clone(); all_cores.append(&mut disputed_cores); - assert_eq!(inclusion::PendingAvailability::::iter().count(), used_cores as usize,); + assert_eq!(inclusion::PendingAvailability::::iter().count(), used_cores - extra_cores); // Mark all the used cores as occupied. We expect that there are // `backed_and_concluding_paras` that are pending availability and that there are @@ -829,7 +851,7 @@ impl BenchBuilder { .keys() .flat_map(|para_id| { (0..elastic_paras.get(¶_id).cloned().unwrap_or(1)) - .map(|_para_local_core_idx| { + .filter_map(|_para_local_core_idx| { let ttl = configuration::Pallet::::config().scheduler_params.ttl; // Load an assignment into provider so that one is present to pop let assignment = @@ -842,8 +864,13 @@ impl BenchBuilder { CoreIndex(core_idx), [ParasEntry::new(assignment, now + ttl)].into(), ); + let res = if builder.unavailable_cores.contains(&core_idx) { + None + } else { + Some(entry) + }; core_idx += 1; - entry + res }) .collect::>)>>() }) diff --git a/polkadot/runtime/parachains/src/configuration.rs b/polkadot/runtime/parachains/src/configuration.rs index 389dd8cbe23d..b552e919a68a 100644 --- a/polkadot/runtime/parachains/src/configuration.rs +++ b/polkadot/runtime/parachains/src/configuration.rs @@ -26,9 +26,9 @@ use polkadot_parachain_primitives::primitives::{ MAX_HORIZONTAL_MESSAGE_NUM, MAX_UPWARD_MESSAGE_NUM, }; use primitives::{ - vstaging::{ApprovalVotingParams, NodeFeatures}, - AsyncBackingParams, Balance, ExecutorParamError, ExecutorParams, SessionIndex, - LEGACY_MIN_BACKING_VOTES, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, + ApprovalVotingParams, AsyncBackingParams, Balance, ExecutorParamError, ExecutorParams, + NodeFeatures, SessionIndex, LEGACY_MIN_BACKING_VOTES, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, + MAX_POV_SIZE, ON_DEMAND_MAX_QUEUE_MAX_SIZE, }; use sp_runtime::{traits::Zero, Perbill}; @@ -185,7 +185,7 @@ pub struct HostConfiguration { /// /// Must be at least 1. pub no_show_slots: u32, - /// The number of delay tranches in total. + /// The number of delay tranches in total. Must be at least 1. pub n_delay_tranches: u32, /// The width of the zeroth delay tranche for approval assignments. This many delay tranches /// beyond 0 are all consolidated to form a wide 0 tranche. @@ -245,7 +245,7 @@ impl> Default for HostConfiguration { InconsistentExecutorParams { inner: ExecutorParamError }, /// TTL should be bigger than lookahead LookaheadExceedsTTL, + /// Passed in queue size for on-demand was too large. + OnDemandQueueSizeTooLarge, + /// Number of delay tranches cannot be 0. + ZeroDelayTranches, } impl HostConfiguration @@ -404,6 +408,14 @@ where return Err(LookaheadExceedsTTL) } + if self.scheduler_params.on_demand_queue_max_size > ON_DEMAND_MAX_QUEUE_MAX_SIZE { + return Err(OnDemandQueueSizeTooLarge) + } + + if self.n_delay_tranches.is_zero() { + return Err(ZeroDelayTranches) + } + Ok(()) } @@ -629,7 +641,7 @@ pub mod pallet { /// Set the number of coretime execution cores. /// - /// Note that this configuration is managed by the coretime chain. Only manually change + /// NOTE: that this configuration is managed by the coretime chain. Only manually change /// this, if you really know what you are doing! #[pallet::call_index(6)] #[pallet::weight(( @@ -1132,6 +1144,7 @@ pub mod pallet { config.scheduler_params.on_demand_queue_max_size = new; }) } + /// Set the on demand (parathreads) fee variability. #[pallet::call_index(50)] #[pallet::weight(( diff --git a/polkadot/runtime/parachains/src/configuration/migration/v10.rs b/polkadot/runtime/parachains/src/configuration/migration/v10.rs index a87cfe963a62..6c594bec634f 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v10.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v10.rs @@ -17,17 +17,16 @@ //! A module that is responsible for migration of storage. use crate::configuration::{Config, Pallet}; +#[cfg(not(feature = "std"))] use alloc::vec::Vec; -use frame_support::{pallet_prelude::*, traits::Defensive, weights::Weight}; +use frame_support::{pallet_prelude::*, traits::{Defensive, UncheckedOnRuntimeUpgrade}, weights::Weight}; use frame_system::pallet_prelude::BlockNumberFor; use primitives::{ - vstaging::NodeFeatures, AsyncBackingParams, Balance, ExecutorParams, SessionIndex, + AsyncBackingParams, Balance, ExecutorParams, NodeFeatures, SessionIndex, LEGACY_MIN_BACKING_VOTES, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, }; use sp_runtime::Perbill; -use frame_support::traits::OnRuntimeUpgrade; - use super::v9::V9HostConfiguration; // All configuration of the runtime with respect to paras. #[derive(Clone, Encode, PartialEq, Decode, Debug)] @@ -163,7 +162,7 @@ mod v10 { } pub struct VersionUncheckedMigrateToV10(core::marker::PhantomData); -impl OnRuntimeUpgrade for VersionUncheckedMigrateToV10 { +impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateToV10 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { log::trace!(target: crate::configuration::LOG_TARGET, "Running pre_upgrade() for HostConfiguration MigrateToV10"); diff --git a/polkadot/runtime/parachains/src/configuration/migration/v11.rs b/polkadot/runtime/parachains/src/configuration/migration/v11.rs index 33ada2222f47..5c7286331bb0 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v11.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v11.rs @@ -19,17 +19,18 @@ use crate::configuration::{self, Config, Pallet}; use alloc::vec::Vec; use frame_support::{ - migrations::VersionedMigration, pallet_prelude::*, traits::Defensive, weights::Weight, + migrations::VersionedMigration, + pallet_prelude::*, + traits::{Defensive, UncheckedOnRuntimeUpgrade}, + weights::Weight, }; use frame_system::pallet_prelude::BlockNumberFor; use primitives::{ - vstaging::ApprovalVotingParams, AsyncBackingParams, ExecutorParams, SessionIndex, + ApprovalVotingParams, AsyncBackingParams, ExecutorParams, NodeFeatures, SessionIndex, LEGACY_MIN_BACKING_VOTES, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, }; -use frame_support::traits::OnRuntimeUpgrade; use polkadot_core_primitives::Balance; -use primitives::vstaging::NodeFeatures; use sp_arithmetic::Perbill; use super::v10::V10HostConfiguration; @@ -177,7 +178,7 @@ pub type MigrateToV11 = VersionedMigration< >; pub struct UncheckedMigrateToV11(core::marker::PhantomData); -impl OnRuntimeUpgrade for UncheckedMigrateToV11 { +impl UncheckedOnRuntimeUpgrade for UncheckedMigrateToV11 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { log::trace!(target: crate::configuration::LOG_TARGET, "Running pre_upgrade() for HostConfiguration MigrateToV11"); diff --git a/polkadot/runtime/parachains/src/configuration/migration/v12.rs b/polkadot/runtime/parachains/src/configuration/migration/v12.rs index a57d6cc5782d..27c8d0d3b902 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v12.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v12.rs @@ -21,7 +21,7 @@ use alloc::vec::Vec; use frame_support::{ migrations::VersionedMigration, pallet_prelude::*, - traits::{Defensive, OnRuntimeUpgrade}, + traits::{Defensive, UncheckedOnRuntimeUpgrade}, }; use frame_system::pallet_prelude::BlockNumberFor; use primitives::vstaging::SchedulerParams; @@ -70,7 +70,7 @@ pub type MigrateToV12 = VersionedMigration< pub struct UncheckedMigrateToV12(core::marker::PhantomData); -impl OnRuntimeUpgrade for UncheckedMigrateToV12 { +impl UncheckedOnRuntimeUpgrade for UncheckedMigrateToV12 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { log::trace!(target: crate::configuration::LOG_TARGET, "Running pre_upgrade() for HostConfiguration MigrateToV12"); diff --git a/polkadot/runtime/parachains/src/coretime/migration.rs b/polkadot/runtime/parachains/src/coretime/migration.rs index 02bb387a3901..102e054d7a66 100644 --- a/polkadot/runtime/parachains/src/coretime/migration.rs +++ b/polkadot/runtime/parachains/src/coretime/migration.rs @@ -238,7 +238,7 @@ mod v_coretime { return None }, }; - // We assume the coretime chain set this parameter to the recommened value in RFC-1: + // We assume the coretime chain set this parameter to the recommended value in RFC-1: const TIME_SLICE_PERIOD: u32 = 80; let round_up = if valid_until % TIME_SLICE_PERIOD > 0 { 1 } else { 0 }; let time_slice = valid_until / TIME_SLICE_PERIOD + TIME_SLICE_PERIOD * round_up; diff --git a/polkadot/runtime/parachains/src/coretime/mod.rs b/polkadot/runtime/parachains/src/coretime/mod.rs index ac363628377e..b58fa92f5f2e 100644 --- a/polkadot/runtime/parachains/src/coretime/mod.rs +++ b/polkadot/runtime/parachains/src/coretime/mod.rs @@ -83,6 +83,8 @@ enum CoretimeCalls { SetLease(pallet_broker::TaskId, pallet_broker::Timeslice), #[codec(index = 19)] NotifyCoreCount(u16), + #[codec(index = 99)] + SwapLeases(ParaId, ParaId), } #[frame_support::pallet] @@ -233,6 +235,24 @@ impl Pallet { } } } + + // Handle legacy swaps in coretime. Notifies broker parachain that a lease swap has occurred via + // XCM message. This function is meant to be used in an implementation of `OnSwap` trait. + pub fn on_legacy_lease_swap(one: ParaId, other: ParaId) { + let message = Xcm(vec![ + Instruction::UnpaidExecution { + weight_limit: WeightLimit::Unlimited, + check_origin: None, + }, + mk_coretime_call(crate::coretime::CoretimeCalls::SwapLeases(one, other)), + ]); + if let Err(err) = send_xcm::( + Location::new(0, [Junction::Parachain(T::BrokerId::get())]), + message, + ) { + log::error!("Sending `SwapLeases` to coretime chain failed: {:?}", err); + } + } } impl OnNewSession> for Pallet { diff --git a/polkadot/runtime/parachains/src/disputes.rs b/polkadot/runtime/parachains/src/disputes.rs index 6624e9319445..a663c9c91f2c 100644 --- a/polkadot/runtime/parachains/src/disputes.rs +++ b/polkadot/runtime/parachains/src/disputes.rs @@ -27,11 +27,11 @@ use frame_system::pallet_prelude::*; use parity_scale_codec::{Decode, Encode}; use polkadot_runtime_metrics::get_current_time; use primitives::{ - byzantine_threshold, supermajority_threshold, vstaging::ApprovalVoteMultipleCandidates, - ApprovalVote, CandidateHash, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, - CompactStatement, ConsensusLog, DisputeState, DisputeStatement, DisputeStatementSet, - ExplicitDisputeStatement, InvalidDisputeStatementKind, MultiDisputeStatementSet, SessionIndex, - SigningContext, ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorSignature, + byzantine_threshold, supermajority_threshold, ApprovalVote, ApprovalVoteMultipleCandidates, + CandidateHash, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, CompactStatement, + ConsensusLog, DisputeState, DisputeStatement, DisputeStatementSet, ExplicitDisputeStatement, + InvalidDisputeStatementKind, MultiDisputeStatementSet, SessionIndex, SigningContext, + ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorSignature, }; use scale_info::TypeInfo; use sp_runtime::{ @@ -182,7 +182,7 @@ pub trait DisputesHandler { fn is_frozen() -> bool; /// Remove dispute statement duplicates and sort the non-duplicates based on - /// local (lower indicies) vs remotes (higher indices) and age (older with lower indices). + /// local (lower indices) vs remotes (higher indices) and age (older with lower indices). /// /// Returns `Ok(())` if no duplicates were present, `Err(())` otherwise. /// diff --git a/polkadot/runtime/parachains/src/disputes/slashing.rs b/polkadot/runtime/parachains/src/disputes/slashing.rs index cc232e98ba59..655b7fba55ad 100644 --- a/polkadot/runtime/parachains/src/disputes/slashing.rs +++ b/polkadot/runtime/parachains/src/disputes/slashing.rs @@ -643,7 +643,7 @@ fn is_known_offence( } } -/// Actual `HandleReports` implemention. +/// Actual `HandleReports` implementation. /// /// When configured properly, should be instantiated with /// `T::KeyOwnerIdentification, Offences, ReportLongevity` parameters. diff --git a/polkadot/runtime/parachains/src/hrmp.rs b/polkadot/runtime/parachains/src/hrmp.rs index 6d6991df87f2..a66a4d1b7577 100644 --- a/polkadot/runtime/parachains/src/hrmp.rs +++ b/polkadot/runtime/parachains/src/hrmp.rs @@ -226,12 +226,12 @@ impl fmt::Debug for OutboundHrmpAcceptanceErr { ), TotalSizeExceeded { idx, total_size, limit } => write!( fmt, - "sending the HRMP message at index {} would exceed the neogitiated channel total size ({} > {})", + "sending the HRMP message at index {} would exceed the negotiated channel total size ({} > {})", idx, total_size, limit, ), CapacityExceeded { idx, count, limit } => write!( fmt, - "sending the HRMP message at index {} would exceed the neogitiated channel capacity ({} > {})", + "sending the HRMP message at index {} would exceed the negotiated channel capacity ({} > {})", idx, count, limit, ), } @@ -787,7 +787,7 @@ pub mod pallet { .ok_or(ArithmeticError::Underflow)?; T::Currency::unreserve( &channel_id.sender.into_account_truncating(), - // The difference should always be convertable into `Balance`, but be + // The difference should always be convertible into `Balance`, but be // paranoid and do nothing in case. amount.try_into().unwrap_or(Zero::zero()), ); diff --git a/polkadot/runtime/parachains/src/hrmp/tests.rs b/polkadot/runtime/parachains/src/hrmp/tests.rs index 7e7b67c8059d..162c14121601 100644 --- a/polkadot/runtime/parachains/src/hrmp/tests.rs +++ b/polkadot/runtime/parachains/src/hrmp/tests.rs @@ -702,7 +702,7 @@ fn verify_externally_accessible() { sp_io::storage::get(&well_known_keys::hrmp_ingress_channel_index(para_b)) .expect("the ingress index must be present for para_b"); let ingress_index = >::decode(&mut &raw_ingress_index[..]) - .expect("ingress indexx should be decodable as a list of para ids"); + .expect("ingress index should be decodable as a list of para ids"); assert_eq!(ingress_index, vec![para_a]); // Now, verify that we can access and decode the egress index. diff --git a/polkadot/runtime/parachains/src/inclusion/migration.rs b/polkadot/runtime/parachains/src/inclusion/migration.rs new file mode 100644 index 000000000000..5f35680ee694 --- /dev/null +++ b/polkadot/runtime/parachains/src/inclusion/migration.rs @@ -0,0 +1,317 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +pub use v1::MigrateToV1; + +pub mod v0 { + use crate::inclusion::{Config, Pallet}; + use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; + use frame_support::{storage_alias, Twox64Concat}; + use frame_system::pallet_prelude::BlockNumberFor; + use parity_scale_codec::{Decode, Encode}; + use primitives::{ + AvailabilityBitfield, CandidateCommitments, CandidateDescriptor, CandidateHash, CoreIndex, + GroupIndex, Id as ParaId, ValidatorIndex, + }; + use scale_info::TypeInfo; + + #[derive(Encode, Decode, PartialEq, TypeInfo, Clone, Debug)] + pub struct CandidatePendingAvailability { + pub core: CoreIndex, + pub hash: CandidateHash, + pub descriptor: CandidateDescriptor, + pub availability_votes: BitVec, + pub backers: BitVec, + pub relay_parent_number: N, + pub backed_in_number: N, + pub backing_group: GroupIndex, + } + + #[derive(Encode, Decode, TypeInfo, Debug, PartialEq)] + pub struct AvailabilityBitfieldRecord { + pub bitfield: AvailabilityBitfield, + pub submitted_at: N, + } + + #[storage_alias] + pub type PendingAvailability = StorageMap< + Pallet, + Twox64Concat, + ParaId, + CandidatePendingAvailability<::Hash, BlockNumberFor>, + >; + + #[storage_alias] + pub type PendingAvailabilityCommitments = + StorageMap, Twox64Concat, ParaId, CandidateCommitments>; + + #[storage_alias] + pub type AvailabilityBitfields = StorageMap< + Pallet, + Twox64Concat, + ValidatorIndex, + AvailabilityBitfieldRecord>, + >; +} + +mod v1 { + use super::v0::{ + AvailabilityBitfields, PendingAvailability as V0PendingAvailability, + PendingAvailabilityCommitments as V0PendingAvailabilityCommitments, + }; + use crate::inclusion::{ + CandidatePendingAvailability as V1CandidatePendingAvailability, Config, Pallet, + PendingAvailability as V1PendingAvailability, + }; + use frame_support::{traits::UncheckedOnRuntimeUpgrade, weights::Weight}; + use sp_core::Get; + use sp_std::{collections::vec_deque::VecDeque, vec::Vec}; + + #[cfg(feature = "try-runtime")] + use frame_support::{ + ensure, + traits::{GetStorageVersion, StorageVersion}, + }; + #[cfg(feature = "try-runtime")] + use parity_scale_codec::{Decode, Encode}; + + pub struct VersionUncheckedMigrateToV1(sp_std::marker::PhantomData); + + impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateToV1 { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + log::trace!(target: crate::inclusion::LOG_TARGET, "Running pre_upgrade() for inclusion MigrateToV1"); + let candidates_before_upgrade = V0PendingAvailability::::iter().count(); + let commitments_before_upgrade = V0PendingAvailabilityCommitments::::iter().count(); + + if candidates_before_upgrade != commitments_before_upgrade { + log::warn!( + target: crate::inclusion::LOG_TARGET, + "Number of pending candidates differ from the number of pending commitments. {} vs {}", + candidates_before_upgrade, + commitments_before_upgrade + ); + } + + Ok((candidates_before_upgrade as u32).encode()) + } + + fn on_runtime_upgrade() -> Weight { + let mut weight: Weight = Weight::zero(); + + let v0_candidates: Vec<_> = V0PendingAvailability::::drain().collect(); + + for (para_id, candidate) in v0_candidates { + let commitments = V0PendingAvailabilityCommitments::::take(para_id); + // One write for each removal (one candidate and one commitment). + weight = weight.saturating_add(T::DbWeight::get().writes(2)); + + if let Some(commitments) = commitments { + let mut per_para = VecDeque::new(); + per_para.push_back(V1CandidatePendingAvailability { + core: candidate.core, + hash: candidate.hash, + descriptor: candidate.descriptor, + availability_votes: candidate.availability_votes, + backers: candidate.backers, + relay_parent_number: candidate.relay_parent_number, + backed_in_number: candidate.backed_in_number, + backing_group: candidate.backing_group, + commitments, + }); + V1PendingAvailability::::insert(para_id, per_para); + + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + } + } + + // should've already been drained by the above for loop, but as a sanity check, in case + // there are more commitments than candidates. + // V0PendingAvailabilityCommitments should not contain too many keys so removing + // everything at once should be safe + let res = V0PendingAvailabilityCommitments::::clear(u32::MAX, None); + weight = weight.saturating_add( + T::DbWeight::get().reads_writes(res.loops as u64, res.backend as u64), + ); + + // AvailabilityBitfields should not contain too many keys so removing everything at once + // should be safe. + let res = AvailabilityBitfields::::clear(u32::MAX, None); + weight = weight.saturating_add( + T::DbWeight::get().reads_writes(res.loops as u64, res.backend as u64), + ); + + weight + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + log::trace!(target: crate::inclusion::LOG_TARGET, "Running post_upgrade() for inclusion MigrateToV1"); + ensure!( + Pallet::::on_chain_storage_version() >= StorageVersion::new(1), + "Storage version should be >= 1 after the migration" + ); + + let candidates_before_upgrade = + u32::decode(&mut &state[..]).expect("Was properly encoded") as usize; + let candidates_after_upgrade = V1PendingAvailability::::iter().fold( + 0usize, + |mut acc, (_paraid, para_candidates)| { + acc += para_candidates.len(); + acc + }, + ); + + ensure!( + candidates_before_upgrade == candidates_after_upgrade, + "Number of pending candidates should be the same as the one before the upgrade." + ); + ensure!( + V0PendingAvailability::::iter().next() == None, + "Pending availability candidates storage v0 should have been removed" + ); + ensure!( + V0PendingAvailabilityCommitments::::iter().next() == None, + "Pending availability commitments storage should have been removed" + ); + ensure!( + AvailabilityBitfields::::iter().next() == None, + "Availability bitfields storage should have been removed" + ); + + Ok(()) + } + } + + /// Migrate to v1 inclusion module storage. + /// - merges the `PendingAvailabilityCommitments` into the `CandidatePendingAvailability` + /// storage + /// - removes the `AvailabilityBitfields` storage, which was never read. + pub type MigrateToV1 = frame_support::migrations::VersionedMigration< + 0, + 1, + VersionUncheckedMigrateToV1, + Pallet, + ::DbWeight, + >; +} + +#[cfg(test)] +mod tests { + use super::{v1::VersionUncheckedMigrateToV1, *}; + use crate::{ + inclusion::{ + CandidatePendingAvailability as V1CandidatePendingAvailability, + PendingAvailability as V1PendingAvailability, *, + }, + mock::{new_test_ext, MockGenesisConfig, Test}, + }; + use frame_support::traits::UncheckedOnRuntimeUpgrade; + use primitives::{AvailabilityBitfield, Id as ParaId}; + use test_helpers::{dummy_candidate_commitments, dummy_candidate_descriptor, dummy_hash}; + + #[test] + fn migrate_to_v1() { + new_test_ext(MockGenesisConfig::default()).execute_with(|| { + // No data to migrate. + assert_eq!( + as UncheckedOnRuntimeUpgrade>::on_runtime_upgrade(), + Weight::zero() + ); + assert!(V1PendingAvailability::::iter().next().is_none()); + + let mut expected = vec![]; + + for i in 1..5 { + let descriptor = dummy_candidate_descriptor(dummy_hash()); + v0::PendingAvailability::::insert( + ParaId::from(i), + v0::CandidatePendingAvailability { + core: CoreIndex(i), + descriptor: descriptor.clone(), + relay_parent_number: i, + hash: CandidateHash(dummy_hash()), + availability_votes: Default::default(), + backed_in_number: i, + backers: Default::default(), + backing_group: GroupIndex(i), + }, + ); + v0::PendingAvailabilityCommitments::::insert( + ParaId::from(i), + dummy_candidate_commitments(HeadData(vec![i as _])), + ); + + v0::AvailabilityBitfields::::insert( + ValidatorIndex(i), + v0::AvailabilityBitfieldRecord { + bitfield: AvailabilityBitfield(Default::default()), + submitted_at: i, + }, + ); + + expected.push(( + ParaId::from(i), + [V1CandidatePendingAvailability { + core: CoreIndex(i), + descriptor, + relay_parent_number: i, + hash: CandidateHash(dummy_hash()), + availability_votes: Default::default(), + backed_in_number: i, + backers: Default::default(), + backing_group: GroupIndex(i), + commitments: dummy_candidate_commitments(HeadData(vec![i as _])), + }] + .into_iter() + .collect::>(), + )); + } + // add some wrong data also, candidates without commitments or commitments without + // candidates. + v0::PendingAvailability::::insert( + ParaId::from(6), + v0::CandidatePendingAvailability { + core: CoreIndex(6), + descriptor: dummy_candidate_descriptor(dummy_hash()), + relay_parent_number: 6, + hash: CandidateHash(dummy_hash()), + availability_votes: Default::default(), + backed_in_number: 6, + backers: Default::default(), + backing_group: GroupIndex(6), + }, + ); + v0::PendingAvailabilityCommitments::::insert( + ParaId::from(7), + dummy_candidate_commitments(HeadData(vec![7 as _])), + ); + + // For tests, db weight is zero. + assert_eq!( + as UncheckedOnRuntimeUpgrade>::on_runtime_upgrade(), + Weight::zero() + ); + + assert_eq!(v0::PendingAvailabilityCommitments::::iter().next(), None); + assert_eq!(v0::PendingAvailability::::iter().next(), None); + assert_eq!(v0::AvailabilityBitfields::::iter().next(), None); + + let mut actual = V1PendingAvailability::::iter().collect::>(); + actual.sort_by(|(id1, _), (id2, _)| id1.cmp(id2)); + expected.sort_by(|(id1, _), (id2, _)| id1.cmp(id2)); + + assert_eq!(actual, expected); + }); + } +} diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 76dc8a58b67d..0c11fbc29d84 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -22,29 +22,30 @@ use crate::{ configuration::{self, HostConfiguration}, disputes, dmp, hrmp, - paras::{self, SetGoAhead}, - scheduler::{self, AvailabilityTimeoutStatus}, + paras::{self, UpgradeStrategy}, + scheduler, shared::{self, AllowedRelayParentsTracker}, + util::make_persisted_validation_data_with_parent, }; -use alloc::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; +use alloc::collections::{btree_map::BTreeMap, btree_set::BTreeSet, vec_deque::VecDeque}; use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; #[cfg(feature = "std")] use core::fmt; use frame_support::{ defensive, pallet_prelude::*, - traits::{Defensive, EnqueueMessage, Footprint, QueueFootprint}, + traits::{EnqueueMessage, Footprint, QueueFootprint}, BoundedSlice, }; use frame_system::pallet_prelude::*; use pallet_message_queue::OnQueueChanged; use parity_scale_codec::{Decode, Encode}; use primitives::{ - effective_minimum_backing_votes, supermajority_threshold, well_known_keys, - AvailabilityBitfield, BackedCandidate, CandidateCommitments, CandidateDescriptor, - CandidateHash, CandidateReceipt, CommittedCandidateReceipt, CoreIndex, GroupIndex, Hash, - HeadData, Id as ParaId, SignedAvailabilityBitfields, SigningContext, UpwardMessage, - ValidatorId, ValidatorIndex, ValidityAttestation, + effective_minimum_backing_votes, supermajority_threshold, well_known_keys, BackedCandidate, + CandidateCommitments, CandidateDescriptor, CandidateHash, CandidateReceipt, + CommittedCandidateReceipt, CoreIndex, GroupIndex, Hash, HeadData, Id as ParaId, + SignedAvailabilityBitfields, SigningContext, UpwardMessage, ValidatorId, ValidatorIndex, + ValidityAttestation, }; use scale_info::TypeInfo; use sp_runtime::{traits::One, DispatchError, SaturatedConversion, Saturating}; @@ -57,6 +58,8 @@ pub(crate) mod tests; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; +pub mod migration; + pub trait WeightInfo { fn receive_upward_messages(i: u32) -> Weight; } @@ -80,20 +83,8 @@ impl WeightInfo for () { /// `configuration` pallet to check these values before setting. pub const MAX_UPWARD_MESSAGE_SIZE_BOUND: u32 = 128 * 1024; -/// A bitfield signed by a validator indicating that it is keeping its piece of the erasure-coding -/// for any backed candidates referred to by a `1` bit available. -/// -/// The bitfield's signature should be checked at the point of submission. Afterwards it can be -/// dropped. -#[derive(Encode, Decode, TypeInfo)] -#[cfg_attr(test, derive(Debug))] -pub struct AvailabilityBitfieldRecord { - bitfield: AvailabilityBitfield, // one bit per core. - submitted_at: N, // for accounting, as meaning of bits may change over time. -} - /// A backed candidate pending availability. -#[derive(Encode, Decode, PartialEq, TypeInfo)] +#[derive(Encode, Decode, PartialEq, TypeInfo, Clone)] #[cfg_attr(test, derive(Debug))] pub struct CandidatePendingAvailability { /// The availability core this is assigned to. @@ -102,6 +93,8 @@ pub struct CandidatePendingAvailability { hash: CandidateHash, /// The candidate descriptor. descriptor: CandidateDescriptor, + /// The candidate commitments. + commitments: CandidateCommitments, /// The received availability votes. One bit per validator. availability_votes: BitVec, /// The backers of the candidate pending availability. @@ -121,8 +114,11 @@ impl CandidatePendingAvailability { } /// Get the relay-chain block number this was backed in. - pub(crate) fn backed_in_number(&self) -> &N { - &self.backed_in_number + pub(crate) fn backed_in_number(&self) -> N + where + N: Clone, + { + self.backed_in_number.clone() } /// Get the core index. @@ -140,6 +136,11 @@ impl CandidatePendingAvailability { &self.descriptor } + /// Get the candidate commitments. + pub(crate) fn candidate_commitments(&self) -> &CandidateCommitments { + &self.commitments + } + /// Get the candidate's relay parent's number. pub(crate) fn relay_parent_number(&self) -> N where @@ -148,11 +149,22 @@ impl CandidatePendingAvailability { self.relay_parent_number.clone() } + /// Get the candidate backing group. + pub(crate) fn backing_group(&self) -> GroupIndex { + self.backing_group + } + + /// Get the candidate's backers. + pub(crate) fn backers(&self) -> &BitVec { + &self.backers + } + #[cfg(any(feature = "runtime-benchmarks", test))] pub(crate) fn new( core: CoreIndex, hash: CandidateHash, descriptor: CandidateDescriptor, + commitments: CandidateCommitments, availability_votes: BitVec, backers: BitVec, relay_parent_number: N, @@ -163,6 +175,7 @@ impl CandidatePendingAvailability { core, hash, descriptor, + commitments, availability_votes, backers, relay_parent_number, @@ -253,8 +266,10 @@ pub type MaxUmpMessageLenOf = pub mod pallet { use super::*; + const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] #[pallet::without_storage_info] + #[pallet::storage_version(STORAGE_VERSION)] pub struct Pallet(_); #[pallet::config] @@ -297,30 +312,10 @@ pub mod pallet { #[pallet::error] pub enum Error { - /// Validator indices are out of order or contains duplicates. - UnsortedOrDuplicateValidatorIndices, - /// Dispute statement sets are out of order or contain duplicates. - UnsortedOrDuplicateDisputeStatementSet, - /// Backed candidates are out of order (core index) or contain duplicates. - UnsortedOrDuplicateBackedCandidates, - /// A different relay parent was provided compared to the on-chain stored one. - UnexpectedRelayParent, - /// Availability bitfield has unexpected size. - WrongBitfieldSize, - /// Bitfield consists of zeros only. - BitfieldAllZeros, - /// Multiple bitfields submitted by same validator or validators out of order by index. - BitfieldDuplicateOrUnordered, /// Validator index out of bounds. ValidatorIndexOutOfBounds, - /// Invalid signature - InvalidBitfieldSignature, /// Candidate submitted but para not scheduled. UnscheduledCandidate, - /// Candidate scheduled despite pending candidate already existing for the para. - CandidateScheduledBeforeParaFree, - /// Scheduled cores out of order. - ScheduledOutOfOrder, /// Head data exceeds the configured maximum. HeadDataTooLarge, /// Code upgrade prematurely. @@ -356,31 +351,22 @@ pub mod pallet { /// The `para_head` hash in the candidate descriptor doesn't match the hash of the actual /// para head in the commitments. ParaHeadMismatch, - /// A bitfield that references a freed core, - /// either intentionally or as part of a concluded - /// invalid dispute. - BitfieldReferencesFreedCore, } - /// The latest bitfield for each validator, referred to by their index in the validator set. - #[pallet::storage] - pub(crate) type AvailabilityBitfields = - StorageMap<_, Twox64Concat, ValidatorIndex, AvailabilityBitfieldRecord>>; - - /// Candidates pending availability by `ParaId`. + /// Candidates pending availability by `ParaId`. They form a chain starting from the latest + /// included head of the para. + /// Use a different prefix post-migration to v1, since the v0 `PendingAvailability` storage + /// would otherwise have the exact same prefix which could cause undefined behaviour when doing + /// the migration. #[pallet::storage] + #[pallet::storage_prefix = "V1"] pub(crate) type PendingAvailability = StorageMap< _, Twox64Concat, ParaId, - CandidatePendingAvailability>, + VecDeque>>, >; - /// The commitments of candidates pending availability, by `ParaId`. - #[pallet::storage] - pub(crate) type PendingAvailabilityCommitments = - StorageMap<_, Twox64Concat, ParaId, CandidateCommitments>; - #[pallet::call] impl Pallet {} } @@ -469,9 +455,7 @@ impl Pallet { ) { // unlike most drain methods, drained elements are not cleared on `Drop` of the iterator // and require consumption. - for _ in >::drain() {} for _ in >::drain() {} - for _ in >::drain() {} Self::cleanup_outgoing_ump_dispatch_queues(outgoing_paras); } @@ -490,27 +474,18 @@ impl Pallet { /// /// Bitfields are expected to have been sanitized already. E.g. via `sanitize_bitfields`! /// - /// Updates storage items `PendingAvailability` and `AvailabilityBitfields`. + /// Updates storage items `PendingAvailability`. /// /// Returns a `Vec` of `CandidateHash`es and their respective `AvailabilityCore`s that became /// available, and cores free. - pub(crate) fn update_pending_availability_and_get_freed_cores( - expected_bits: usize, + pub(crate) fn update_pending_availability_and_get_freed_cores( validators: &[ValidatorId], signed_bitfields: SignedAvailabilityBitfields, - core_lookup: F, - ) -> Vec<(CoreIndex, CandidateHash)> - where - F: Fn(CoreIndex) -> Option, - { - let mut assigned_paras_record = (0..expected_bits) - .map(|bit_index| core_lookup(CoreIndex::from(bit_index as u32))) - .map(|opt_para_id| { - opt_para_id.map(|para_id| (para_id, PendingAvailability::::get(¶_id))) - }) - .collect::>(); + ) -> Vec<(CoreIndex, CandidateHash)> { + let threshold = availability_threshold(validators.len()); + + let mut votes_per_core: BTreeMap> = BTreeMap::new(); - let now = >::block_number(); for (checked_bitfield, validator_index) in signed_bitfields.into_iter().map(|signed_bitfield| { let validator_idx = signed_bitfield.validator_index(); @@ -518,310 +493,275 @@ impl Pallet { (checked_bitfield, validator_idx) }) { for (bit_idx, _) in checked_bitfield.0.iter().enumerate().filter(|(_, is_av)| **is_av) { - let pending_availability = if let Some((_, pending_availability)) = - assigned_paras_record[bit_idx].as_mut() - { - pending_availability - } else { - // For honest validators, this happens in case of unoccupied cores, - // which in turn happens in case of a disputed candidate. - // A malicious one might include arbitrary indices, but they are represented - // by `None` values and will be sorted out in the next if case. - continue - }; - - // defensive check - this is constructed by loading the availability bitfield - // record, which is always `Some` if the core is occupied - that's why we're here. - let validator_index = validator_index.0 as usize; - if let Some(mut bit) = - pending_availability.as_mut().and_then(|candidate_pending_availability| { - candidate_pending_availability.availability_votes.get_mut(validator_index) - }) { - *bit = true; - } + let core_index = CoreIndex(bit_idx as u32); + votes_per_core + .entry(core_index) + .or_insert_with(|| BTreeSet::new()) + .insert(validator_index); } - - let record = - AvailabilityBitfieldRecord { bitfield: checked_bitfield, submitted_at: now }; - - >::insert(&validator_index, record); } - let threshold = availability_threshold(validators.len()); + let mut freed_cores = vec![]; + + let pending_paraids: Vec<_> = >::iter_keys().collect(); + for paraid in pending_paraids { + >::mutate(paraid, |candidates| { + if let Some(candidates) = candidates { + let mut last_enacted_index: Option = None; + + for (candidate_index, candidate) in candidates.iter_mut().enumerate() { + if let Some(validator_indices) = votes_per_core.remove(&candidate.core) { + for validator_index in validator_indices.iter() { + // defensive check - this is constructed by loading the + // availability bitfield record, which is always `Some` if + // the core is occupied - that's why we're here. + if let Some(mut bit) = + candidate.availability_votes.get_mut(validator_index.0 as usize) + { + *bit = true; + } + } + } + + // We check for the candidate's availability even if we didn't get any new + // bitfields for its core, as it may have already been available at a + // previous block but wasn't enacted due to its predecessors not being + // available. + if candidate.availability_votes.count_ones() >= threshold { + // We can only enact a candidate if we've enacted all of its + // predecessors already. + let can_enact = if candidate_index == 0 { + last_enacted_index == None + } else { + let prev_candidate_index = usize::try_from(candidate_index - 1) + .expect("Previous `if` would have caught a 0 candidate index."); + matches!(last_enacted_index, Some(old_index) if old_index == prev_candidate_index) + }; + + if can_enact { + last_enacted_index = Some(candidate_index); + } + } + } - let mut freed_cores = Vec::with_capacity(expected_bits); - for (para_id, pending_availability) in assigned_paras_record - .into_iter() - .flatten() - .filter_map(|(id, p)| p.map(|p| (id, p))) - { - if pending_availability.availability_votes.count_ones() >= threshold { - >::remove(¶_id); - let commitments = match PendingAvailabilityCommitments::::take(¶_id) { - Some(commitments) => commitments, - None => { - log::warn!( - target: LOG_TARGET, - "Inclusion::process_bitfields: PendingAvailability and PendingAvailabilityCommitments - are out of sync, did someone mess with the storage?", - ); - continue - }, - }; - - let receipt = CommittedCandidateReceipt { - descriptor: pending_availability.descriptor, - commitments, - }; - let _weight = Self::enact_candidate( - pending_availability.relay_parent_number, - receipt, - pending_availability.backers, - pending_availability.availability_votes, - pending_availability.core, - pending_availability.backing_group, - ); - - freed_cores.push((pending_availability.core, pending_availability.hash)); - } else { - >::insert(¶_id, &pending_availability); - } + // Trim the pending availability candidates storage and enact candidates of this + // para now. + if let Some(last_enacted_index) = last_enacted_index { + let evicted_candidates = candidates.drain(0..=last_enacted_index); + for candidate in evicted_candidates { + freed_cores.push((candidate.core, candidate.hash)); + + let receipt = CommittedCandidateReceipt { + descriptor: candidate.descriptor, + commitments: candidate.commitments, + }; + let _weight = Self::enact_candidate( + candidate.relay_parent_number, + receipt, + candidate.backers, + candidate.availability_votes, + candidate.core, + candidate.backing_group, + ); + } + } + } + }); } freed_cores } - /// Process candidates that have been backed. Provide the relay storage root, a set of - /// candidates and scheduled cores. + /// Process candidates that have been backed. Provide a set of + /// candidates along with their scheduled cores. /// - /// Both should be sorted ascending by core index, and the candidates should be a subset of - /// scheduled cores. If these conditions are not met, the execution of the function fails. + /// Candidates of the same paraid should be sorted according to their dependency order (they + /// should form a chain). If this condition is not met, this function will return an error. + /// (This really should not happen here, if the candidates were properly sanitised in + /// paras_inherent). pub(crate) fn process_candidates( allowed_relay_parents: &AllowedRelayParentsTracker>, - candidates: Vec<(BackedCandidate, CoreIndex)>, + candidates: &BTreeMap, CoreIndex)>>, group_validators: GV, core_index_enabled: bool, ) -> Result, DispatchError> where GV: Fn(GroupIndex) -> Option>, { - let now = >::block_number(); - if candidates.is_empty() { return Ok(ProcessedCandidates::default()) } - let minimum_backing_votes = configuration::Pallet::::config().minimum_backing_votes; + let now = >::block_number(); let validators = shared::Pallet::::active_validator_keys(); // Collect candidate receipts with backers. let mut candidate_receipt_with_backing_validator_indices = Vec::with_capacity(candidates.len()); + let mut core_indices = Vec::with_capacity(candidates.len()); - // Do all checks before writing storage. - let core_indices_and_backers = { - let mut core_indices_and_backers = Vec::with_capacity(candidates.len()); - let mut last_core = None; - - let mut check_assignment_in_order = |core_idx| -> DispatchResult { - ensure!( - last_core.map_or(true, |core| core_idx > core), - Error::::ScheduledOutOfOrder, - ); - - last_core = Some(core_idx); - Ok(()) + for (para_id, para_candidates) in candidates { + let mut latest_head_data = match Self::para_latest_head_data(para_id) { + None => { + defensive!("Latest included head data for paraid {:?} is None", para_id); + continue + }, + Some(latest_head_data) => latest_head_data, }; - // We combine an outer loop over candidates with an inner loop over the scheduled, - // where each iteration of the outer loop picks up at the position - // in scheduled just after the past iteration left off. - // - // If the candidates appear in the same order as they appear in `scheduled`, - // then they should always be found. If the end of `scheduled` is reached, - // then the candidate was either not scheduled or out-of-order. - // - // In the meantime, we do certain sanity checks on the candidates and on the scheduled - // list. - for (candidate_idx, (backed_candidate, core_index)) in candidates.iter().enumerate() { - let relay_parent_hash = backed_candidate.descriptor().relay_parent; - let para_id = backed_candidate.descriptor().para_id; - - let prev_context = >::para_most_recent_context(para_id); - - let check_ctx = CandidateCheckContext::::new(prev_context); - let signing_context = SigningContext { - parent_hash: relay_parent_hash, - session_index: shared::Pallet::::session_index(), - }; - - let relay_parent_number = match check_ctx.verify_backed_candidate( + for (candidate, core) in para_candidates.iter() { + let candidate_hash = candidate.candidate().hash(); + + let check_ctx = CandidateCheckContext::::new(None); + let relay_parent_number = check_ctx.verify_backed_candidate( &allowed_relay_parents, - candidate_idx, - backed_candidate.candidate(), - )? { - Err(FailedToCreatePVD) => { - log::debug!( - target: LOG_TARGET, - "Failed to create PVD for candidate {}", - candidate_idx, - ); - // We don't want to error out here because it will - // brick the relay-chain. So we return early without - // doing anything. - return Ok(ProcessedCandidates::default()) - }, - Ok(rpn) => rpn, - }; - - let (validator_indices, _) = - backed_candidate.validator_indices_and_core_index(core_index_enabled); - - log::debug!( - target: LOG_TARGET, - "Candidate {:?} on {:?}, - core_index_enabled = {}", - backed_candidate.hash(), - core_index, - core_index_enabled - ); - - check_assignment_in_order(core_index)?; - - let mut backers = bitvec::bitvec![u8, BitOrderLsb0; 0; validators.len()]; - - ensure!( - >::get(¶_id).is_none() && - >::get(¶_id).is_none(), - Error::::CandidateScheduledBeforeParaFree, - ); - - // The candidate based upon relay parent `N` should be backed by a group - // assigned to core at block `N + 1`. Thus, `relay_parent_number + 1` - // will always land in the current session. + candidate.candidate(), + latest_head_data.clone(), + )?; + + // The candidate based upon relay parent `N` should be backed by a + // group assigned to core at block `N + 1`. Thus, + // `relay_parent_number + 1` will always land in the current + // session. let group_idx = >::group_assigned_to_core( - *core_index, + *core, relay_parent_number + One::one(), ) .ok_or_else(|| { log::warn!( target: LOG_TARGET, - "Failed to compute group index for candidate {}", - candidate_idx + "Failed to compute group index for candidate {:?}", + candidate_hash ); Error::::InvalidAssignment })?; let group_vals = group_validators(group_idx).ok_or_else(|| Error::::InvalidGroupIndex)?; - // check the signatures in the backing and that it is a majority. - { - let maybe_amount_validated = primitives::check_candidate_backing( - backed_candidate.candidate().hash(), - backed_candidate.validity_votes(), - validator_indices, - &signing_context, - group_vals.len(), - |intra_group_vi| { - group_vals - .get(intra_group_vi) - .and_then(|vi| validators.get(vi.0 as usize)) - .map(|v| v.clone()) - }, - ); - - match maybe_amount_validated { - Ok(amount_validated) => ensure!( - amount_validated >= - effective_minimum_backing_votes( - group_vals.len(), - minimum_backing_votes - ), - Error::::InsufficientBacking, - ), - Err(()) => { - Err(Error::::InvalidBacking)?; - }, - } - - let mut backer_idx_and_attestation = - Vec::<(ValidatorIndex, ValidityAttestation)>::with_capacity( - validator_indices.count_ones(), - ); - let candidate_receipt = backed_candidate.receipt(); - - for ((bit_idx, _), attestation) in validator_indices - .iter() - .enumerate() - .filter(|(_, signed)| **signed) - .zip(backed_candidate.validity_votes().iter().cloned()) - { - let val_idx = - group_vals.get(bit_idx).expect("this query succeeded above; qed"); - backer_idx_and_attestation.push((*val_idx, attestation)); - - backers.set(val_idx.0 as _, true); + // Check backing vote count and validity. + let (backers, backer_idx_and_attestation) = Self::check_backing_votes( + candidate, + &validators, + group_vals, + core_index_enabled, + )?; + + // Found a valid candidate. + latest_head_data = candidate.candidate().commitments.head_data.clone(); + candidate_receipt_with_backing_validator_indices + .push((candidate.receipt(), backer_idx_and_attestation)); + core_indices.push((*core, *para_id)); + + // Update storage now + >::mutate(¶_id, |pending_availability| { + let new_candidate = CandidatePendingAvailability { + core: *core, + hash: candidate_hash, + descriptor: candidate.candidate().descriptor.clone(), + commitments: candidate.candidate().commitments.clone(), + // initialize all availability votes to 0. + availability_votes: bitvec::bitvec![u8, BitOrderLsb0; 0; validators.len()], + relay_parent_number, + backers: backers.to_bitvec(), + backed_in_number: now, + backing_group: group_idx, + }; + + if let Some(pending_availability) = pending_availability { + pending_availability.push_back(new_candidate); + } else { + *pending_availability = + Some([new_candidate].into_iter().collect::>()) } - candidate_receipt_with_backing_validator_indices - .push((candidate_receipt, backer_idx_and_attestation)); - } + }); - core_indices_and_backers.push(( - (*core_index, para_id), - backers, + // Deposit backed event. + Self::deposit_event(Event::::CandidateBacked( + candidate.candidate().to_plain(), + candidate.candidate().commitments.head_data.clone(), + *core, group_idx, - relay_parent_number, )); } + } - core_indices_and_backers - }; + Ok(ProcessedCandidates:: { + core_indices, + candidate_receipt_with_backing_validator_indices, + }) + } - // one more sweep for actually writing to storage. - let core_indices = core_indices_and_backers.iter().map(|(c, ..)| *c).collect(); - for ((candidate, _), (core, backers, group, relay_parent_number)) in - candidates.into_iter().zip(core_indices_and_backers) - { - let para_id = candidate.descriptor().para_id; + // Get the latest backed output head data of this para. + pub(crate) fn para_latest_head_data(para_id: &ParaId) -> Option { + match >::get(para_id).and_then(|pending_candidates| { + pending_candidates.back().map(|x| x.commitments.head_data.clone()) + }) { + Some(head_data) => Some(head_data), + None => >::para_head(para_id), + } + } - // initialize all availability votes to 0. - let availability_votes: BitVec = - bitvec::bitvec![u8, BitOrderLsb0; 0; validators.len()]; + fn check_backing_votes( + backed_candidate: &BackedCandidate, + validators: &[ValidatorId], + group_vals: Vec, + core_index_enabled: bool, + ) -> Result<(BitVec, Vec<(ValidatorIndex, ValidityAttestation)>), Error> { + let minimum_backing_votes = configuration::Pallet::::config().minimum_backing_votes; - Self::deposit_event(Event::::CandidateBacked( - candidate.candidate().to_plain(), - candidate.candidate().commitments.head_data.clone(), - core.0, - group, - )); + let mut backers = bitvec::bitvec![u8, BitOrderLsb0; 0; validators.len()]; + let signing_context = SigningContext { + parent_hash: backed_candidate.descriptor().relay_parent, + session_index: shared::Pallet::::session_index(), + }; - let candidate_hash = candidate.candidate().hash(); + let (validator_indices, _) = + backed_candidate.validator_indices_and_core_index(core_index_enabled); + + // check the signatures in the backing and that it is a majority. + let maybe_amount_validated = primitives::check_candidate_backing( + backed_candidate.candidate().hash(), + backed_candidate.validity_votes(), + validator_indices, + &signing_context, + group_vals.len(), + |intra_group_vi| { + group_vals + .get(intra_group_vi) + .and_then(|vi| validators.get(vi.0 as usize)) + .map(|v| v.clone()) + }, + ); - let (descriptor, commitments) = ( - candidate.candidate().descriptor.clone(), - candidate.candidate().commitments.clone(), - ); + match maybe_amount_validated { + Ok(amount_validated) => ensure!( + amount_validated >= + effective_minimum_backing_votes(group_vals.len(), minimum_backing_votes), + Error::::InsufficientBacking, + ), + Err(()) => { + Err(Error::::InvalidBacking)?; + }, + } - >::insert( - ¶_id, - CandidatePendingAvailability { - core: core.0, - hash: candidate_hash, - descriptor, - availability_votes, - relay_parent_number, - backers: backers.to_bitvec(), - backed_in_number: now, - backing_group: group, - }, + let mut backer_idx_and_attestation = + Vec::<(ValidatorIndex, ValidityAttestation)>::with_capacity( + validator_indices.count_ones(), ); - >::insert(¶_id, commitments); + + for ((bit_idx, _), attestation) in validator_indices + .iter() + .enumerate() + .filter(|(_, signed)| **signed) + .zip(backed_candidate.validity_votes().iter().cloned()) + { + let val_idx = group_vals.get(bit_idx).expect("this query succeeded above; qed"); + backer_idx_and_attestation.push((*val_idx, attestation)); + + backers.set(val_idx.0 as _, true); } - Ok(ProcessedCandidates:: { - core_indices, - candidate_receipt_with_backing_validator_indices, - }) + Ok((backers, backer_idx_and_attestation)) } /// Run the acceptance criteria checks on the given candidate commitments. @@ -896,7 +836,7 @@ impl Pallet { new_code, now, &config, - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, )); } @@ -1028,110 +968,155 @@ impl Pallet { weight } - /// Cleans up all paras pending availability that the predicate returns true for. - /// - /// The predicate accepts the index of the core and the block number the core has been occupied - /// since (i.e. the block number the candidate was backed at in this fork of the relay chain). + /// Cleans up all timed out candidates as well as their descendant candidates. /// /// Returns a vector of cleaned-up core IDs. - pub(crate) fn collect_pending( - pred: impl Fn(BlockNumberFor) -> AvailabilityTimeoutStatus>, - ) -> Vec { - let mut cleaned_up_ids = Vec::new(); - let mut cleaned_up_cores = Vec::new(); - - for (para_id, pending_record) in >::iter() { - if pred(pending_record.backed_in_number).timed_out { - cleaned_up_ids.push(para_id); - cleaned_up_cores.push(pending_record.core); - } - } - - for para_id in cleaned_up_ids { - let pending = >::take(¶_id); - let commitments = >::take(¶_id); - - if let (Some(pending), Some(commitments)) = (pending, commitments) { - // defensive: this should always be true. - let candidate = CandidateReceipt { - descriptor: pending.descriptor, - commitments_hash: commitments.hash(), - }; + pub(crate) fn free_timedout() -> Vec { + let timeout_pred = >::availability_timeout_predicate(); + + let timed_out: Vec<_> = Self::free_failed_cores( + |candidate| timeout_pred(candidate.backed_in_number).timed_out, + None, + ) + .collect(); + + let mut timed_out_cores = Vec::with_capacity(timed_out.len()); + for candidate in timed_out.iter() { + timed_out_cores.push(candidate.core); + + let receipt = CandidateReceipt { + descriptor: candidate.descriptor.clone(), + commitments_hash: candidate.commitments.hash(), + }; - Self::deposit_event(Event::::CandidateTimedOut( - candidate, - commitments.head_data, - pending.core, - )); - } + Self::deposit_event(Event::::CandidateTimedOut( + receipt, + candidate.commitments.head_data.clone(), + candidate.core, + )); } - cleaned_up_cores + timed_out_cores } - /// Cleans up all paras pending availability that are in the given list of disputed candidates. + /// Cleans up all cores pending availability occupied by one of the disputed candidates or which + /// are descendants of a disputed candidate. /// - /// Returns a vector of cleaned-up core IDs. - pub(crate) fn collect_disputed(disputed: &BTreeSet) -> Vec { - let mut cleaned_up_ids = Vec::new(); - let mut cleaned_up_cores = Vec::new(); - - for (para_id, pending_record) in >::iter() { - if disputed.contains(&pending_record.hash) { - cleaned_up_ids.push(para_id); - cleaned_up_cores.push(pending_record.core); + /// Returns a vector of cleaned-up core IDs, along with the evicted candidate hashes. + pub(crate) fn free_disputed( + disputed: &BTreeSet, + ) -> Vec<(CoreIndex, CandidateHash)> { + Self::free_failed_cores( + |candidate| disputed.contains(&candidate.hash), + Some(disputed.len()), + ) + .map(|candidate| (candidate.core, candidate.hash)) + .collect() + } + + // Clean up cores whose candidates are deemed as failed by the predicate. `pred` returns true if + // a candidate is considered failed. + // A failed candidate also frees all subsequent cores which hold descendants of said candidate. + fn free_failed_cores< + P: Fn(&CandidatePendingAvailability>) -> bool, + >( + pred: P, + capacity_hint: Option, + ) -> impl Iterator>> { + let mut earliest_dropped_indices: BTreeMap = BTreeMap::new(); + + for (para_id, pending_candidates) in >::iter() { + // We assume that pending candidates are stored in dependency order. So we need to store + // the earliest dropped candidate. All others that follow will get freed as well. + let mut earliest_dropped_idx = None; + for (index, candidate) in pending_candidates.iter().enumerate() { + if pred(candidate) { + earliest_dropped_idx = Some(index); + // Since we're looping the candidates in dependency order, we've found the + // earliest failed index for this paraid. + break; + } + } + + if let Some(earliest_dropped_idx) = earliest_dropped_idx { + earliest_dropped_indices.insert(para_id, earliest_dropped_idx); } } - for para_id in cleaned_up_ids { - let _ = >::take(¶_id); - let _ = >::take(¶_id); + let mut cleaned_up_cores = + if let Some(capacity) = capacity_hint { Vec::with_capacity(capacity) } else { vec![] }; + + for (para_id, earliest_dropped_idx) in earliest_dropped_indices { + // Do cleanups and record the cleaned up cores + >::mutate(¶_id, |record| { + if let Some(record) = record { + let cleaned_up = record.drain(earliest_dropped_idx..); + cleaned_up_cores.extend(cleaned_up); + } + }); } - cleaned_up_cores + cleaned_up_cores.into_iter() } - /// Forcibly enact the candidate with the given ID as though it had been deemed available - /// by bitfields. + /// Forcibly enact the pending candidates of the given paraid as though they had been deemed + /// available by bitfields. /// /// Is a no-op if there is no candidate pending availability for this para-id. - /// This should generally not be used but it is useful during execution of Runtime APIs, + /// If there are multiple candidates pending availability for this para-id, it will enact all of + /// them. This should generally not be used but it is useful during execution of Runtime APIs, /// where the changes to the state are expected to be discarded directly after. pub(crate) fn force_enact(para: ParaId) { - let pending = >::take(¶); - let commitments = >::take(¶); - - if let (Some(pending), Some(commitments)) = (pending, commitments) { - let candidate = - CommittedCandidateReceipt { descriptor: pending.descriptor, commitments }; - - Self::enact_candidate( - pending.relay_parent_number, - candidate, - pending.backers, - pending.availability_votes, - pending.core, - pending.backing_group, - ); - } + >::mutate(¶, |candidates| { + if let Some(candidates) = candidates { + for candidate in candidates.drain(..) { + let receipt = CommittedCandidateReceipt { + descriptor: candidate.descriptor, + commitments: candidate.commitments, + }; + + Self::enact_candidate( + candidate.relay_parent_number, + receipt, + candidate.backers, + candidate.availability_votes, + candidate.core, + candidate.backing_group, + ); + } + } + }); } - /// Returns the `CommittedCandidateReceipt` pending availability for the para provided, if any. + /// Returns the first `CommittedCandidateReceipt` pending availability for the para provided, if + /// any. pub(crate) fn candidate_pending_availability( para: ParaId, ) -> Option> { - >::get(¶) - .map(|p| p.descriptor) - .and_then(|d| >::get(¶).map(move |c| (d, c))) - .map(|(d, c)| CommittedCandidateReceipt { descriptor: d, commitments: c }) + >::get(¶).and_then(|p| { + p.get(0).map(|p| CommittedCandidateReceipt { + descriptor: p.descriptor.clone(), + commitments: p.commitments.clone(), + }) + }) } - /// Returns the metadata around the candidate pending availability for the + /// Returns the metadata around the first candidate pending availability for the /// para provided, if any. pub(crate) fn pending_availability( para: ParaId, + ) -> Option>> { + >::get(¶).and_then(|p| p.get(0).cloned()) + } + + /// Returns the metadata around the candidate pending availability occupying the supplied core, + /// if any. + pub(crate) fn pending_availability_with_core( + para: ParaId, + core: CoreIndex, ) -> Option>> { >::get(¶) + .and_then(|p| p.iter().find(|c| c.core == core).cloned()) } } @@ -1182,10 +1167,6 @@ pub(crate) struct CandidateCheckContext { prev_context: Option>, } -/// An error indicating that creating Persisted Validation Data failed -/// while checking a candidate's validity. -pub(crate) struct FailedToCreatePVD; - impl CandidateCheckContext { pub(crate) fn new(prev_context: Option>) -> Self { Self { config: >::config(), prev_context } @@ -1203,9 +1184,9 @@ impl CandidateCheckContext { pub(crate) fn verify_backed_candidate( &self, allowed_relay_parents: &AllowedRelayParentsTracker>, - candidate_idx: usize, backed_candidate_receipt: &CommittedCandidateReceipt<::Hash>, - ) -> Result, FailedToCreatePVD>, Error> { + parent_head_data: HeadData, + ) -> Result, Error> { let para_id = backed_candidate_receipt.descriptor().para_id; let relay_parent = backed_candidate_receipt.descriptor().relay_parent; @@ -1218,16 +1199,11 @@ impl CandidateCheckContext { }; { - let persisted_validation_data = match crate::util::make_persisted_validation_data::( - para_id, + let persisted_validation_data = make_persisted_validation_data_with_parent::( relay_parent_number, relay_parent_storage_root, - ) - .defensive_proof("the para is registered") - { - Some(l) => l, - None => return Ok(Err(FailedToCreatePVD)), - }; + parent_head_data, + ); let expected = persisted_validation_data.hash(); @@ -1268,13 +1244,13 @@ impl CandidateCheckContext { ) { log::debug!( target: LOG_TARGET, - "Validation outputs checking during inclusion of a candidate {} for parachain `{}` failed", - candidate_idx, + "Validation outputs checking during inclusion of a candidate {:?} for parachain `{}` failed", + backed_candidate_receipt.hash(), u32::from(para_id), ); Err(err.strip_into_dispatch_err::())?; }; - Ok(Ok(relay_parent_number)) + Ok(relay_parent_number) } /// Check the given outputs after candidate validation on whether it passes the acceptance diff --git a/polkadot/runtime/parachains/src/inclusion/tests.rs b/polkadot/runtime/parachains/src/inclusion/tests.rs index 3fe7d7f0c7d4..97bf67ef934e 100644 --- a/polkadot/runtime/parachains/src/inclusion/tests.rs +++ b/polkadot/runtime/parachains/src/inclusion/tests.rs @@ -27,7 +27,7 @@ use crate::{ shared::AllowedRelayParentsTracker, }; use primitives::{ - effective_minimum_backing_votes, SignedAvailabilityBitfields, + effective_minimum_backing_votes, AvailabilityBitfield, SignedAvailabilityBitfields, UncheckedSignedAvailabilityBitfields, }; @@ -360,81 +360,288 @@ fn simple_sanitize_bitfields( } /// Process a set of already sanitized bitfields. pub(crate) fn process_bitfields( - expected_bits: usize, signed_bitfields: SignedAvailabilityBitfields, - core_lookup: impl Fn(CoreIndex) -> Option, ) -> Vec<(CoreIndex, CandidateHash)> { let validators = shared::Pallet::::active_validator_keys(); - ParaInclusion::update_pending_availability_and_get_freed_cores::<_>( - expected_bits, + ParaInclusion::update_pending_availability_and_get_freed_cores( &validators[..], signed_bitfields, - core_lookup, ) } #[test] -fn collect_pending_cleans_up_pending() { +fn free_timedout() { let chain_a = ParaId::from(1_u32); let chain_b = ParaId::from(2_u32); - let thread_a = ParaId::from(3_u32); + let chain_c = ParaId::from(3_u32); + let chain_d = ParaId::from(4_u32); + let chain_e = ParaId::from(5_u32); + let chain_f = ParaId::from(6_u32); + let thread_a = ParaId::from(7_u32); let paras = vec![ (chain_a, ParaKind::Parachain), (chain_b, ParaKind::Parachain), + (chain_c, ParaKind::Parachain), + (chain_d, ParaKind::Parachain), + (chain_e, ParaKind::Parachain), + (chain_f, ParaKind::Parachain), (thread_a, ParaKind::Parathread), ]; let mut config = genesis_config(paras); config.configuration.config.scheduler_params.group_rotation_frequency = 3; new_test_ext(config).execute_with(|| { - let default_candidate = TestCandidateBuilder::default().build(); - >::insert( - chain_a, + let timed_out_cores = ParaInclusion::free_timedout(); + assert!(timed_out_cores.is_empty()); + + let make_candidate = |core_index: u32, timed_out: bool| { + let default_candidate = TestCandidateBuilder::default().build(); + let backed_in_number = if timed_out { 0 } else { 5 }; + CandidatePendingAvailability { - core: CoreIndex::from(0), + core: CoreIndex::from(core_index), hash: default_candidate.hash(), descriptor: default_candidate.descriptor.clone(), availability_votes: default_availability_votes(), relay_parent_number: 0, - backed_in_number: 0, + backed_in_number, backers: default_backing_bitfield(), - backing_group: GroupIndex::from(0), - }, - ); - PendingAvailabilityCommitments::::insert( + backing_group: GroupIndex::from(core_index), + commitments: default_candidate.commitments.clone(), + } + }; + + >::insert( chain_a, - default_candidate.commitments.clone(), + [make_candidate(0, true)].into_iter().collect::>(), ); >::insert( &chain_b, + [make_candidate(1, false)].into_iter().collect::>(), + ); + + // 2 chained candidates. The first one is timed out. The other will be evicted also. + let mut c_candidates = VecDeque::new(); + c_candidates.push_back(make_candidate(2, true)); + c_candidates.push_back(make_candidate(3, false)); + + >::insert(&chain_c, c_candidates); + + // 2 chained candidates. All are timed out. + let mut d_candidates = VecDeque::new(); + d_candidates.push_back(make_candidate(4, true)); + d_candidates.push_back(make_candidate(5, true)); + + >::insert(&chain_d, d_candidates); + + // 3 chained candidates. The second one is timed out. The first one will remain in place. + // With the current time out predicate this scenario is impossible. But this is not a + // concern for this module. + let mut e_candidates = VecDeque::new(); + e_candidates.push_back(make_candidate(6, false)); + e_candidates.push_back(make_candidate(7, true)); + e_candidates.push_back(make_candidate(8, false)); + + >::insert(&chain_e, e_candidates); + + // 3 chained candidates, none are timed out. + let mut f_candidates = VecDeque::new(); + f_candidates.push_back(make_candidate(9, false)); + f_candidates.push_back(make_candidate(10, false)); + f_candidates.push_back(make_candidate(11, false)); + + >::insert(&chain_f, f_candidates); + + run_to_block(5, |_| None); + + assert_eq!(>::get(&chain_a).unwrap().len(), 1); + assert_eq!(>::get(&chain_b).unwrap().len(), 1); + assert_eq!(>::get(&chain_c).unwrap().len(), 2); + assert_eq!(>::get(&chain_d).unwrap().len(), 2); + assert_eq!(>::get(&chain_e).unwrap().len(), 3); + assert_eq!(>::get(&chain_f).unwrap().len(), 3); + + let timed_out_cores = ParaInclusion::free_timedout(); + + assert_eq!( + timed_out_cores, + vec![ + CoreIndex(0), + CoreIndex(2), + CoreIndex(3), + CoreIndex(4), + CoreIndex(5), + CoreIndex(7), + CoreIndex(8), + ] + ); + + assert!(>::get(&chain_a).unwrap().is_empty()); + assert_eq!(>::get(&chain_b).unwrap().len(), 1); + assert!(>::get(&chain_c).unwrap().is_empty()); + assert!(>::get(&chain_d).unwrap().is_empty()); + assert_eq!( + >::get(&chain_e) + .unwrap() + .into_iter() + .map(|c| c.core) + .collect::>(), + vec![CoreIndex(6)] + ); + assert_eq!( + >::get(&chain_f) + .unwrap() + .into_iter() + .map(|c| c.core) + .collect::>(), + vec![CoreIndex(9), CoreIndex(10), CoreIndex(11)] + ); + }); +} + +#[test] +fn free_disputed() { + let chain_a = ParaId::from(1_u32); + let chain_b = ParaId::from(2_u32); + let chain_c = ParaId::from(3_u32); + let chain_d = ParaId::from(4_u32); + let chain_e = ParaId::from(5_u32); + let chain_f = ParaId::from(6_u32); + let thread_a = ParaId::from(7_u32); + + let paras = vec![ + (chain_a, ParaKind::Parachain), + (chain_b, ParaKind::Parachain), + (chain_c, ParaKind::Parachain), + (chain_d, ParaKind::Parachain), + (chain_e, ParaKind::Parachain), + (chain_f, ParaKind::Parachain), + (thread_a, ParaKind::Parathread), + ]; + let mut config = genesis_config(paras); + config.configuration.config.scheduler_params.group_rotation_frequency = 3; + new_test_ext(config).execute_with(|| { + let disputed_cores = ParaInclusion::free_disputed(&BTreeSet::new()); + assert!(disputed_cores.is_empty()); + + let disputed_cores = ParaInclusion::free_disputed( + &[CandidateHash::default()].into_iter().collect::>(), + ); + assert!(disputed_cores.is_empty()); + + let make_candidate = |core_index: u32| { + let default_candidate = TestCandidateBuilder::default().build(); + CandidatePendingAvailability { - core: CoreIndex::from(1), - hash: default_candidate.hash(), - descriptor: default_candidate.descriptor, + core: CoreIndex::from(core_index), + hash: CandidateHash(Hash::from_low_u64_be(core_index as _)), + descriptor: default_candidate.descriptor.clone(), availability_votes: default_availability_votes(), relay_parent_number: 0, - backed_in_number: 5, + backed_in_number: 0, backers: default_backing_bitfield(), - backing_group: GroupIndex::from(1), - }, + backing_group: GroupIndex::from(core_index), + commitments: default_candidate.commitments.clone(), + } + }; + + // Disputed + >::insert( + chain_a, + [make_candidate(0)].into_iter().collect::>(), + ); + + // Not disputed. + >::insert( + &chain_b, + [make_candidate(1)].into_iter().collect::>(), ); - PendingAvailabilityCommitments::::insert(chain_b, default_candidate.commitments); + + // 2 chained candidates. The first one is disputed. The other will be evicted also. + let mut c_candidates = VecDeque::new(); + c_candidates.push_back(make_candidate(2)); + c_candidates.push_back(make_candidate(3)); + + >::insert(&chain_c, c_candidates); + + // 2 chained candidates. All are disputed. + let mut d_candidates = VecDeque::new(); + d_candidates.push_back(make_candidate(4)); + d_candidates.push_back(make_candidate(5)); + + >::insert(&chain_d, d_candidates); + + // 3 chained candidates. The second one is disputed. The first one will remain in place. + let mut e_candidates = VecDeque::new(); + e_candidates.push_back(make_candidate(6)); + e_candidates.push_back(make_candidate(7)); + e_candidates.push_back(make_candidate(8)); + + >::insert(&chain_e, e_candidates); + + // 3 chained candidates, none are disputed. + let mut f_candidates = VecDeque::new(); + f_candidates.push_back(make_candidate(9)); + f_candidates.push_back(make_candidate(10)); + f_candidates.push_back(make_candidate(11)); + + >::insert(&chain_f, f_candidates); run_to_block(5, |_| None); - assert!(>::get(&chain_a).is_some()); - assert!(>::get(&chain_b).is_some()); - assert!(>::get(&chain_a).is_some()); - assert!(>::get(&chain_b).is_some()); + assert_eq!(>::get(&chain_a).unwrap().len(), 1); + assert_eq!(>::get(&chain_b).unwrap().len(), 1); + assert_eq!(>::get(&chain_c).unwrap().len(), 2); + assert_eq!(>::get(&chain_d).unwrap().len(), 2); + assert_eq!(>::get(&chain_e).unwrap().len(), 3); + assert_eq!(>::get(&chain_f).unwrap().len(), 3); + + let disputed_candidates = [ + CandidateHash(Hash::from_low_u64_be(0)), + CandidateHash(Hash::from_low_u64_be(2)), + CandidateHash(Hash::from_low_u64_be(4)), + CandidateHash(Hash::from_low_u64_be(5)), + CandidateHash(Hash::from_low_u64_be(7)), + ] + .into_iter() + .collect::>(); + let disputed_cores = ParaInclusion::free_disputed(&disputed_candidates); - ParaInclusion::collect_pending(Scheduler::availability_timeout_predicate()); + assert_eq!( + disputed_cores.into_iter().map(|(core, _)| core).collect::>(), + vec![ + CoreIndex(0), + CoreIndex(2), + CoreIndex(3), + CoreIndex(4), + CoreIndex(5), + CoreIndex(7), + CoreIndex(8), + ] + ); - assert!(>::get(&chain_a).is_none()); - assert!(>::get(&chain_b).is_some()); - assert!(>::get(&chain_a).is_none()); - assert!(>::get(&chain_b).is_some()); + assert!(>::get(&chain_a).unwrap().is_empty()); + assert_eq!(>::get(&chain_b).unwrap().len(), 1); + assert!(>::get(&chain_c).unwrap().is_empty()); + assert!(>::get(&chain_d).unwrap().is_empty()); + assert_eq!( + >::get(&chain_e) + .unwrap() + .into_iter() + .map(|c| c.core) + .collect::>(), + vec![CoreIndex(6)] + ); + assert_eq!( + >::get(&chain_f) + .unwrap() + .into_iter() + .map(|c| c.core) + .collect::>(), + vec![CoreIndex(9), CoreIndex(10), CoreIndex(11)] + ); }); } @@ -474,14 +681,6 @@ fn bitfield_checks() { let signing_context = SigningContext { parent_hash: System::parent_hash(), session_index: 5 }; - let core_lookup = |core| match core { - core if core == CoreIndex::from(0) => Some(chain_a), - core if core == CoreIndex::from(1) => Some(chain_b), - core if core == CoreIndex::from(2) => Some(thread_a), - core if core == CoreIndex::from(3) => None, // for the expected_cores() + 1 test below. - _ => panic!("out of bounds for testing"), - }; - // too many bits in bitfield { let mut bare_bitfield = default_bitfield(); @@ -550,7 +749,7 @@ fn bitfield_checks() { ); assert_eq!(checked_bitfields.len(), 1, "No bitfields should have been filtered!"); - let x = process_bitfields(expected_bits(), checked_bitfields, core_lookup); + let x = process_bitfields(checked_bitfields); assert!(x.is_empty(), "No core should be freed."); } @@ -571,7 +770,7 @@ fn bitfield_checks() { ); assert_eq!(checked_bitfields.len(), 1, "No bitfields should have been filtered!"); - let x = process_bitfields(expected_bits(), checked_bitfields, core_lookup); + let x = process_bitfields(checked_bitfields); assert!(x.is_empty(), "No core should be freed."); } @@ -579,12 +778,10 @@ fn bitfield_checks() { { let mut bare_bitfield = default_bitfield(); - assert_eq!(core_lookup(CoreIndex::from(0)), Some(chain_a)); - let default_candidate = TestCandidateBuilder::default().build(); >::insert( chain_a, - CandidatePendingAvailability { + [CandidatePendingAvailability { core: CoreIndex::from(0), hash: default_candidate.hash(), descriptor: default_candidate.descriptor, @@ -593,9 +790,11 @@ fn bitfield_checks() { backed_in_number: 0, backers: default_backing_bitfield(), backing_group: GroupIndex::from(0), - }, + commitments: default_candidate.commitments, + }] + .into_iter() + .collect::>(), ); - PendingAvailabilityCommitments::::insert(chain_a, default_candidate.commitments); *bare_bitfield.0.get_mut(0).unwrap() = true; let signed = sign_bitfield( @@ -613,53 +812,10 @@ fn bitfield_checks() { ); assert_eq!(checked_bitfields.len(), 1, "No bitfields should have been filtered!"); - let x = process_bitfields(expected_bits(), checked_bitfields, core_lookup); + let x = process_bitfields(checked_bitfields); assert!(x.is_empty(), "No core should be freed."); >::remove(chain_a); - PendingAvailabilityCommitments::::remove(chain_a); - } - - // bitfield signed with pending bit signed, but no commitments. - { - let mut bare_bitfield = default_bitfield(); - - assert_eq!(core_lookup(CoreIndex::from(0)), Some(chain_a)); - - let default_candidate = TestCandidateBuilder::default().build(); - >::insert( - chain_a, - CandidatePendingAvailability { - core: CoreIndex::from(0), - hash: default_candidate.hash(), - descriptor: default_candidate.descriptor, - availability_votes: default_availability_votes(), - relay_parent_number: 0, - backed_in_number: 0, - backers: default_backing_bitfield(), - backing_group: GroupIndex::from(0), - }, - ); - - *bare_bitfield.0.get_mut(0).unwrap() = true; - let signed = sign_bitfield( - &keystore, - &validators[0], - ValidatorIndex(0), - bare_bitfield, - &signing_context, - ); - - let checked_bitfields = simple_sanitize_bitfields( - vec![signed.into()], - DisputedBitfield::zeros(expected_bits()), - expected_bits(), - ); - assert_eq!(checked_bitfields.len(), 1, "No bitfields should have been filtered!"); - - let x = process_bitfields(expected_bits(), checked_bitfields, core_lookup); - // no core is freed - assert!(x.is_empty(), "No core should be freed."); } }); } @@ -673,13 +829,17 @@ fn availability_threshold_is_supermajority() { #[test] fn supermajority_bitfields_trigger_availability() { - let chain_a = ParaId::from(1_u32); - let chain_b = ParaId::from(2_u32); - let thread_a = ParaId::from(3_u32); + let chain_a = ParaId::from(0_u32); + let chain_b = ParaId::from(1_u32); + let chain_c = ParaId::from(2_u32); + let chain_d = ParaId::from(3_u32); + let thread_a = ParaId::from(4_u32); let paras = vec![ (chain_a, ParaKind::Parachain), (chain_b, ParaKind::Parachain), + (chain_c, ParaKind::Parachain), + (chain_d, ParaKind::Parachain), (thread_a, ParaKind::Parathread), ]; let validators = vec![ @@ -688,6 +848,8 @@ fn supermajority_bitfields_trigger_availability() { Sr25519Keyring::Charlie, Sr25519Keyring::Dave, Sr25519Keyring::Ferdie, + Sr25519Keyring::One, + Sr25519Keyring::Two, ]; let keystore: KeystorePtr = Arc::new(LocalKeystore::in_memory()); for validator in validators.iter() { @@ -707,13 +869,7 @@ fn supermajority_bitfields_trigger_availability() { let signing_context = SigningContext { parent_hash: System::parent_hash(), session_index: 5 }; - let core_lookup = |core| match core { - core if core == CoreIndex::from(0) => Some(chain_a), - core if core == CoreIndex::from(1) => Some(chain_b), - core if core == CoreIndex::from(2) => Some(thread_a), - _ => panic!("Core out of bounds for 2 parachains and 1 parathread core."), - }; - + // Chain A only has one candidate pending availability. It will be made available now. let candidate_a = TestCandidateBuilder { para_id: chain_a, head_data: vec![1, 2, 3, 4].into(), @@ -723,7 +879,7 @@ fn supermajority_bitfields_trigger_availability() { >::insert( chain_a, - CandidatePendingAvailability { + [CandidatePendingAvailability { core: CoreIndex::from(0), hash: candidate_a.hash(), descriptor: candidate_a.clone().descriptor, @@ -732,10 +888,13 @@ fn supermajority_bitfields_trigger_availability() { backed_in_number: 0, backers: backing_bitfield(&[3, 4]), backing_group: GroupIndex::from(0), - }, + commitments: candidate_a.clone().commitments, + }] + .into_iter() + .collect::>(), ); - PendingAvailabilityCommitments::::insert(chain_a, candidate_a.clone().commitments); + // Chain B only has one candidate pending availability. It won't be made available now. let candidate_b = TestCandidateBuilder { para_id: chain_b, head_data: vec![5, 6, 7, 8].into(), @@ -745,7 +904,7 @@ fn supermajority_bitfields_trigger_availability() { >::insert( chain_b, - CandidatePendingAvailability { + [CandidatePendingAvailability { core: CoreIndex::from(1), hash: candidate_b.hash(), descriptor: candidate_b.descriptor, @@ -754,40 +913,99 @@ fn supermajority_bitfields_trigger_availability() { backed_in_number: 0, backers: backing_bitfield(&[0, 2]), backing_group: GroupIndex::from(1), - }, + commitments: candidate_b.commitments, + }] + .into_iter() + .collect::>(), ); - PendingAvailabilityCommitments::::insert(chain_b, candidate_b.commitments); - // this bitfield signals that a and b are available. - let a_and_b_available = { - let mut bare_bitfield = default_bitfield(); - *bare_bitfield.0.get_mut(0).unwrap() = true; - *bare_bitfield.0.get_mut(1).unwrap() = true; + // Chain C has three candidates pending availability. The first and third candidates will be + // made available. Only the first candidate will be evicted from the core and enacted. + let candidate_c_1 = TestCandidateBuilder { + para_id: chain_c, + head_data: vec![7, 8].into(), + ..Default::default() + } + .build(); + let candidate_c_2 = TestCandidateBuilder { + para_id: chain_c, + head_data: vec![9, 10].into(), + ..Default::default() + } + .build(); + let candidate_c_3 = TestCandidateBuilder { + para_id: chain_c, + head_data: vec![11, 12].into(), + ..Default::default() + } + .build(); - bare_bitfield - }; + let mut c_candidates = VecDeque::new(); + c_candidates.push_back(CandidatePendingAvailability { + core: CoreIndex::from(2), + hash: candidate_c_1.hash(), + descriptor: candidate_c_1.descriptor.clone(), + availability_votes: default_availability_votes(), + relay_parent_number: 0, + backed_in_number: 0, + backers: backing_bitfield(&[1]), + backing_group: GroupIndex::from(2), + commitments: candidate_c_1.commitments.clone(), + }); + c_candidates.push_back(CandidatePendingAvailability { + core: CoreIndex::from(3), + hash: candidate_c_2.hash(), + descriptor: candidate_c_2.descriptor.clone(), + availability_votes: default_availability_votes(), + relay_parent_number: 0, + backed_in_number: 0, + backers: backing_bitfield(&[5]), + backing_group: GroupIndex::from(3), + commitments: candidate_c_2.commitments.clone(), + }); + c_candidates.push_back(CandidatePendingAvailability { + core: CoreIndex::from(4), + hash: candidate_c_3.hash(), + descriptor: candidate_c_3.descriptor.clone(), + availability_votes: default_availability_votes(), + relay_parent_number: 0, + backed_in_number: 0, + backers: backing_bitfield(&[6]), + backing_group: GroupIndex::from(4), + commitments: candidate_c_3.commitments.clone(), + }); - // this bitfield signals that only a is available. - let a_available = { + >::insert(chain_c, c_candidates); + + // this bitfield signals that a and b are available. + let all_available = { let mut bare_bitfield = default_bitfield(); - *bare_bitfield.0.get_mut(0).unwrap() = true; + for bit in 0..=4 { + *bare_bitfield.0.get_mut(bit).unwrap() = true; + } bare_bitfield }; let threshold = availability_threshold(validators.len()); - // 4 of 5 first value >= 2/3 - assert_eq!(threshold, 4); + // 5 of 7 first value >= 2/3 + assert_eq!(threshold, 5); let signed_bitfields = validators .iter() .enumerate() .filter_map(|(i, key)| { - let to_sign = if i < 3 { - a_and_b_available.clone() - } else if i < 4 { - a_available.clone() + let to_sign = if i < 4 { + all_available.clone() + } else if i < 5 { + // this bitfield signals that only a, c1 and c3 are available. + let mut bare_bitfield = default_bitfield(); + *bare_bitfield.0.get_mut(0).unwrap() = true; + *bare_bitfield.0.get_mut(2).unwrap() = true; + *bare_bitfield.0.get_mut(4).unwrap() = true; + + bare_bitfield } else { // sign nothing. return None @@ -814,46 +1032,129 @@ fn supermajority_bitfields_trigger_availability() { ); assert_eq!(checked_bitfields.len(), old_len, "No bitfields should have been filtered!"); - // only chain A's core is freed. - let v = process_bitfields(expected_bits(), checked_bitfields, core_lookup); - assert_eq!(vec![(CoreIndex(0), candidate_a.hash())], v); - - // chain A had 4 signing off, which is >= threshold. - // chain B has 3 signing off, which is < threshold. - assert!(>::get(&chain_a).is_none()); - assert!(>::get(&chain_a).is_none()); - assert!(>::get(&chain_b).is_some()); - assert_eq!(>::get(&chain_b).unwrap().availability_votes, { - // check that votes from first 3 were tracked. + // only chain A's core and candidate's C1 core are freed. + let v = process_bitfields(checked_bitfields); + assert_eq!( + vec![(CoreIndex(2), candidate_c_1.hash()), (CoreIndex(0), candidate_a.hash())], + v + ); + let votes = |bits: &[usize]| { let mut votes = default_availability_votes(); - *votes.get_mut(0).unwrap() = true; - *votes.get_mut(1).unwrap() = true; - *votes.get_mut(2).unwrap() = true; + for bit in bits { + *votes.get_mut(*bit).unwrap() = true; + } votes - }); + }; - // and check that chain head was enacted. + assert!(>::get(&chain_a).unwrap().is_empty()); + assert_eq!( + >::get(&chain_b) + .unwrap() + .pop_front() + .unwrap() + .availability_votes, + votes(&[0, 1, 2, 3]) + ); + let mut pending_c = >::get(&chain_c).unwrap(); + assert_eq!(pending_c.pop_front().unwrap().availability_votes, votes(&[0, 1, 2, 3])); + assert_eq!(pending_c.pop_front().unwrap().availability_votes, votes(&[0, 1, 2, 3, 4])); + assert!(pending_c.is_empty()); + + // and check that chain heads. assert_eq!(Paras::para_head(&chain_a), Some(vec![1, 2, 3, 4].into())); + assert_ne!(Paras::para_head(&chain_b), Some(vec![5, 6, 7, 8].into())); + assert_eq!(Paras::para_head(&chain_c), Some(vec![7, 8].into())); // Check that rewards are applied. { let rewards = crate::mock::availability_rewards(); - assert_eq!(rewards.len(), 4); - assert_eq!(rewards.get(&ValidatorIndex(0)).unwrap(), &1); - assert_eq!(rewards.get(&ValidatorIndex(1)).unwrap(), &1); - assert_eq!(rewards.get(&ValidatorIndex(2)).unwrap(), &1); + assert_eq!(rewards.len(), 5); + assert_eq!(rewards.get(&ValidatorIndex(0)).unwrap(), &2); + assert_eq!(rewards.get(&ValidatorIndex(1)).unwrap(), &2); + assert_eq!(rewards.get(&ValidatorIndex(2)).unwrap(), &2); + assert_eq!(rewards.get(&ValidatorIndex(3)).unwrap(), &2); + assert_eq!(rewards.get(&ValidatorIndex(4)).unwrap(), &2); + } + + { + let rewards = crate::mock::backing_rewards(); + + assert_eq!(rewards.len(), 3); assert_eq!(rewards.get(&ValidatorIndex(3)).unwrap(), &1); + assert_eq!(rewards.get(&ValidatorIndex(4)).unwrap(), &1); + assert_eq!(rewards.get(&ValidatorIndex(1)).unwrap(), &1); + } + + // Add a new bitfield which will make candidate C2 available also. This will also evict and + // enact C3. + let signed_bitfields = vec![sign_bitfield( + &keystore, + &validators[5], + ValidatorIndex(5), + { + let mut bare_bitfield = default_bitfield(); + *bare_bitfield.0.get_mut(3).unwrap() = true; + bare_bitfield + }, + &signing_context, + ) + .into()]; + + let old_len = signed_bitfields.len(); + let checked_bitfields = simple_sanitize_bitfields( + signed_bitfields, + DisputedBitfield::zeros(expected_bits()), + expected_bits(), + ); + assert_eq!(checked_bitfields.len(), old_len, "No bitfields should have been filtered!"); + + let v = process_bitfields(checked_bitfields); + assert_eq!( + vec![(CoreIndex(3), candidate_c_2.hash()), (CoreIndex(4), candidate_c_3.hash())], + v + ); + + assert!(>::get(&chain_a).unwrap().is_empty()); + assert_eq!( + >::get(&chain_b) + .unwrap() + .pop_front() + .unwrap() + .availability_votes, + votes(&[0, 1, 2, 3]) + ); + assert!(>::get(&chain_c).unwrap().is_empty()); + + // and check that chain heads. + assert_eq!(Paras::para_head(&chain_a), Some(vec![1, 2, 3, 4].into())); + assert_ne!(Paras::para_head(&chain_b), Some(vec![5, 6, 7, 8].into())); + assert_eq!(Paras::para_head(&chain_c), Some(vec![11, 12].into())); + + // Check that rewards are applied. + { + let rewards = crate::mock::availability_rewards(); + + assert_eq!(rewards.len(), 6); + assert_eq!(rewards.get(&ValidatorIndex(0)).unwrap(), &4); + assert_eq!(rewards.get(&ValidatorIndex(1)).unwrap(), &4); + assert_eq!(rewards.get(&ValidatorIndex(2)).unwrap(), &4); + assert_eq!(rewards.get(&ValidatorIndex(3)).unwrap(), &4); + assert_eq!(rewards.get(&ValidatorIndex(4)).unwrap(), &3); + assert_eq!(rewards.get(&ValidatorIndex(5)).unwrap(), &1); } { let rewards = crate::mock::backing_rewards(); - assert_eq!(rewards.len(), 2); + assert_eq!(rewards.len(), 5); assert_eq!(rewards.get(&ValidatorIndex(3)).unwrap(), &1); assert_eq!(rewards.get(&ValidatorIndex(4)).unwrap(), &1); + assert_eq!(rewards.get(&ValidatorIndex(1)).unwrap(), &1); + assert_eq!(rewards.get(&ValidatorIndex(5)).unwrap(), &1); + assert_eq!(rewards.get(&ValidatorIndex(6)).unwrap(), &1); } }); } @@ -878,6 +1179,7 @@ fn candidate_checks() { Sr25519Keyring::Charlie, Sr25519Keyring::Dave, Sr25519Keyring::Ferdie, + Sr25519Keyring::One, ]; let keystore: KeystorePtr = Arc::new(LocalKeystore::in_memory()); for validator in validators.iter() { @@ -904,7 +1206,8 @@ fn candidate_checks() { group_index if group_index == GroupIndex::from(0) => Some(vec![0, 1]), group_index if group_index == GroupIndex::from(1) => Some(vec![2, 3]), group_index if group_index == GroupIndex::from(2) => Some(vec![4]), - _ => panic!("Group index out of bounds for 2 parachains and 1 parathread core"), + group_index if group_index == GroupIndex::from(3) => Some(vec![5]), + _ => panic!("Group index out of bounds"), } .map(|m| m.into_iter().map(ValidatorIndex).collect::>()) }; @@ -914,12 +1217,12 @@ fn candidate_checks() { vec![ValidatorIndex(0), ValidatorIndex(1)], vec![ValidatorIndex(2), ValidatorIndex(3)], vec![ValidatorIndex(4)], + vec![ValidatorIndex(5)], ]; Scheduler::set_validator_groups(validator_groups); let thread_collator: CollatorId = Sr25519Keyring::Two.public().into(); let chain_a_assignment = (chain_a, CoreIndex::from(0)); - let chain_b_assignment = (chain_b, CoreIndex::from(1)); let thread_a_assignment = (thread_a, CoreIndex::from(2)); @@ -929,14 +1232,14 @@ fn candidate_checks() { assert_eq!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![], + &BTreeMap::new(), &group_validators, false ), Ok(ProcessedCandidates::default()) ); - // candidates out of order. + // Check candidate ordering { let mut candidate_a = TestCandidateBuilder { para_id: chain_a, @@ -947,19 +1250,37 @@ fn candidate_checks() { ..Default::default() } .build(); - let mut candidate_b = TestCandidateBuilder { + let mut candidate_b_1 = TestCandidateBuilder { para_id: chain_b, relay_parent: System::parent_hash(), pov_hash: Hash::repeat_byte(2), persisted_validation_data_hash: make_vdata_hash(chain_b).unwrap(), hrmp_watermark: RELAY_PARENT_NUM, + head_data: HeadData(vec![1, 2, 3]), ..Default::default() } .build(); - collator_sign_candidate(Sr25519Keyring::One, &mut candidate_a); + // Make candidate b2 a child of b1. + let mut candidate_b_2 = TestCandidateBuilder { + para_id: chain_b, + relay_parent: System::parent_hash(), + pov_hash: Hash::repeat_byte(3), + persisted_validation_data_hash: make_persisted_validation_data_with_parent::( + RELAY_PARENT_NUM, + Default::default(), + candidate_b_1.commitments.head_data.clone(), + ) + .hash(), + hrmp_watermark: RELAY_PARENT_NUM, + head_data: HeadData(vec![5, 6, 7]), + ..Default::default() + } + .build(); - collator_sign_candidate(Sr25519Keyring::Two, &mut candidate_b); + collator_sign_candidate(Sr25519Keyring::One, &mut candidate_a); + collator_sign_candidate(Sr25519Keyring::Two, &mut candidate_b_1); + collator_sign_candidate(Sr25519Keyring::Two, &mut candidate_b_2); let backed_a = back_candidate( candidate_a, @@ -971,8 +1292,18 @@ fn candidate_checks() { None, ); - let backed_b = back_candidate( - candidate_b, + let backed_b_1 = back_candidate( + candidate_b_1.clone(), + &validators, + group_validators(GroupIndex::from(2)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + None, + ); + + let backed_b_2 = back_candidate( + candidate_b_2, &validators, group_validators(GroupIndex::from(1)).unwrap().as_ref(), &keystore, @@ -981,15 +1312,82 @@ fn candidate_checks() { None, ); - // out-of-order manifests as unscheduled. + // candidates are required to be sorted in dependency order. assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed_b, chain_b_assignment.1), (backed_a, chain_a_assignment.1)], + &vec![( + chain_b, + vec![ + (backed_b_2.clone(), CoreIndex(1)), + (backed_b_1.clone(), CoreIndex(2)) + ] + ),] + .into_iter() + .collect(), &group_validators, false ), - Error::::ScheduledOutOfOrder + Error::::ValidationDataHashMismatch + ); + + // candidates are no longer required to be sorted by core index. + ParaInclusion::process_candidates( + &allowed_relay_parents, + &vec![ + ( + chain_b, + vec![ + (backed_b_1.clone(), CoreIndex(2)), + (backed_b_2.clone(), CoreIndex(1)), + ], + ), + (chain_a_assignment.0, vec![(backed_a.clone(), chain_a_assignment.1)]), + ] + .into_iter() + .collect(), + &group_validators, + false, + ) + .unwrap(); + + // candidate does not build on top of the latest unincluded head + + let mut candidate_b_3 = TestCandidateBuilder { + para_id: chain_b, + relay_parent: System::parent_hash(), + pov_hash: Hash::repeat_byte(4), + persisted_validation_data_hash: make_persisted_validation_data_with_parent::( + RELAY_PARENT_NUM, + Default::default(), + candidate_b_1.commitments.head_data.clone(), + ) + .hash(), + hrmp_watermark: RELAY_PARENT_NUM, + head_data: HeadData(vec![8, 9]), + ..Default::default() + } + .build(); + collator_sign_candidate(Sr25519Keyring::Two, &mut candidate_b_3); + + let backed_b_3 = back_candidate( + candidate_b_3, + &validators, + group_validators(GroupIndex::from(3)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + None, + ); + + assert_noop!( + ParaInclusion::process_candidates( + &allowed_relay_parents, + &vec![(chain_b, vec![(backed_b_3, CoreIndex(3))])].into_iter().collect(), + &group_validators, + false + ), + Error::::ValidationDataHashMismatch ); } @@ -1006,8 +1404,9 @@ fn candidate_checks() { .build(); collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + // Insufficient backing. let backed = back_candidate( - candidate, + candidate.clone(), &validators, group_validators(GroupIndex::from(0)).unwrap().as_ref(), &keystore, @@ -1019,12 +1418,37 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed, chain_a_assignment.1)], + &vec![(chain_a_assignment.0, vec![(backed, chain_a_assignment.1)])] + .into_iter() + .collect(), &group_validators, false ), Error::::InsufficientBacking ); + + // Wrong backing group. + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(1)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + None, + ); + + assert_noop!( + ParaInclusion::process_candidates( + &allowed_relay_parents, + &vec![(chain_a_assignment.0, vec![(backed, chain_a_assignment.1)])] + .into_iter() + .collect(), + &group_validators, + false + ), + Error::::InvalidBacking + ); } // one of candidates is not based on allowed relay parent. @@ -1078,7 +1502,12 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed_b, chain_b_assignment.1), (backed_a, chain_a_assignment.1)], + &vec![ + (chain_b_assignment.0, vec![(backed_b, chain_b_assignment.1)]), + (chain_a_assignment.0, vec![(backed_a, chain_a_assignment.1)]) + ] + .into_iter() + .collect(), &group_validators, false ), @@ -1117,7 +1546,9 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed, thread_a_assignment.1)], + &vec![(thread_a_assignment.0, vec![(backed, thread_a_assignment.1)])] + .into_iter() + .collect(), &group_validators, false ), @@ -1125,100 +1556,6 @@ fn candidate_checks() { ); } - // para occupied - reject. - { - let mut candidate = TestCandidateBuilder { - para_id: chain_a, - relay_parent: System::parent_hash(), - pov_hash: Hash::repeat_byte(1), - persisted_validation_data_hash: make_vdata_hash(chain_a).unwrap(), - hrmp_watermark: RELAY_PARENT_NUM, - ..Default::default() - } - .build(); - - collator_sign_candidate(Sr25519Keyring::One, &mut candidate); - - let backed = back_candidate( - candidate, - &validators, - group_validators(GroupIndex::from(0)).unwrap().as_ref(), - &keystore, - &signing_context, - BackingKind::Threshold, - None, - ); - - let candidate = TestCandidateBuilder::default().build(); - >::insert( - &chain_a, - CandidatePendingAvailability { - core: CoreIndex::from(0), - hash: candidate.hash(), - descriptor: candidate.descriptor, - availability_votes: default_availability_votes(), - relay_parent_number: 3, - backed_in_number: 4, - backers: default_backing_bitfield(), - backing_group: GroupIndex::from(0), - }, - ); - >::insert(&chain_a, candidate.commitments); - - assert_noop!( - ParaInclusion::process_candidates( - &allowed_relay_parents, - vec![(backed, chain_a_assignment.1)], - &group_validators, - false - ), - Error::::CandidateScheduledBeforeParaFree - ); - - >::remove(&chain_a); - >::remove(&chain_a); - } - - // messed up commitments storage - do not panic - reject. - { - let mut candidate = TestCandidateBuilder { - para_id: chain_a, - relay_parent: System::parent_hash(), - pov_hash: Hash::repeat_byte(1), - persisted_validation_data_hash: make_vdata_hash(chain_a).unwrap(), - hrmp_watermark: RELAY_PARENT_NUM, - ..Default::default() - } - .build(); - - collator_sign_candidate(Sr25519Keyring::One, &mut candidate); - - // this is not supposed to happen - >::insert(&chain_a, candidate.commitments.clone()); - - let backed = back_candidate( - candidate, - &validators, - group_validators(GroupIndex::from(0)).unwrap().as_ref(), - &keystore, - &signing_context, - BackingKind::Threshold, - None, - ); - - assert_noop!( - ParaInclusion::process_candidates( - &allowed_relay_parents, - vec![(backed, chain_a_assignment.1)], - &group_validators, - false - ), - Error::::CandidateScheduledBeforeParaFree - ); - - >::remove(&chain_a); - } - // interfering code upgrade - reject { let mut candidate = TestCandidateBuilder { @@ -1253,14 +1590,16 @@ fn candidate_checks() { vec![9, 8, 7, 6, 5, 4, 3, 2, 1].into(), expected_at, &cfg, - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); } assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed, chain_a_assignment.1)], + &vec![(chain_a_assignment.0, vec![(backed, chain_a_assignment.1)])] + .into_iter() + .collect(), &group_validators, false ), @@ -1292,14 +1631,16 @@ fn candidate_checks() { None, ); - assert_eq!( + assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed, chain_a_assignment.1)], + &vec![(chain_a_assignment.0, vec![(backed, chain_a_assignment.1)])] + .into_iter() + .collect(), &group_validators, - false + false, ), - Err(Error::::ValidationDataHashMismatch.into()), + Error::::ValidationDataHashMismatch ); } @@ -1331,7 +1672,9 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed, chain_a_assignment.1)], + &vec![(chain_a_assignment.0, vec![(backed, chain_a_assignment.1)])] + .into_iter() + .collect(), &group_validators, false ), @@ -1367,7 +1710,9 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed, chain_a_assignment.1)], + &vec![(chain_a_assignment.0, vec![(backed, chain_a_assignment.1)])] + .into_iter() + .collect(), &group_validators, false ), @@ -1506,16 +1851,21 @@ fn backing_works() { ); let backed_candidates = vec![ - (backed_a.clone(), chain_a_assignment.1), - (backed_b.clone(), chain_b_assignment.1), - (backed_c, thread_a_assignment.1), - ]; + (chain_a_assignment.0, vec![(backed_a, chain_a_assignment.1)]), + (chain_b_assignment.0, vec![(backed_b, chain_b_assignment.1)]), + (thread_a_assignment.0, vec![(backed_c, thread_a_assignment.1)]), + ] + .into_iter() + .collect::>(); + let get_backing_group_idx = { // the order defines the group implicitly for this test case let backed_candidates_with_groups = backed_candidates - .iter() + .values() .enumerate() - .map(|(idx, (backed_candidate, _))| (backed_candidate.hash(), GroupIndex(idx as _))) + .map(|(idx, backed_candidates)| { + (backed_candidates.iter().next().unwrap().0.hash(), GroupIndex(idx as _)) + }) .collect::>(); move |candidate_hash_x: CandidateHash| -> Option { @@ -1534,7 +1884,7 @@ fn backing_works() { candidate_receipt_with_backing_validator_indices, } = ParaInclusion::process_candidates( &allowed_relay_parents, - backed_candidates.clone(), + &backed_candidates, &group_validators, false, ) @@ -1555,7 +1905,8 @@ fn backing_works() { CandidateHash, (CandidateReceipt, Vec<(ValidatorIndex, ValidityAttestation)>), >::new(); - backed_candidates.into_iter().for_each(|(backed_candidate, _)| { + backed_candidates.values().for_each(|backed_candidates| { + let backed_candidate = backed_candidates.iter().next().unwrap().0.clone(); let candidate_receipt_with_backers = intermediate .entry(backed_candidate.hash()) .or_insert_with(|| (backed_candidate.receipt(), Vec::new())); @@ -1606,20 +1957,21 @@ fn backing_works() { }; assert_eq!( >::get(&chain_a), - Some(CandidatePendingAvailability { - core: CoreIndex::from(0), - hash: candidate_a.hash(), - descriptor: candidate_a.descriptor, - availability_votes: default_availability_votes(), - relay_parent_number: System::block_number() - 1, - backed_in_number: System::block_number(), - backers, - backing_group: GroupIndex::from(0), - }) - ); - assert_eq!( - >::get(&chain_a), - Some(candidate_a.commitments), + Some( + [CandidatePendingAvailability { + core: CoreIndex::from(0), + hash: candidate_a.hash(), + descriptor: candidate_a.descriptor, + availability_votes: default_availability_votes(), + relay_parent_number: System::block_number() - 1, + backed_in_number: System::block_number(), + backers, + backing_group: GroupIndex::from(0), + commitments: candidate_a.commitments, + }] + .into_iter() + .collect::>() + ) ); let backers = { @@ -1631,38 +1983,40 @@ fn backing_works() { }; assert_eq!( >::get(&chain_b), - Some(CandidatePendingAvailability { - core: CoreIndex::from(1), - hash: candidate_b.hash(), - descriptor: candidate_b.descriptor, - availability_votes: default_availability_votes(), - relay_parent_number: System::block_number() - 1, - backed_in_number: System::block_number(), - backers, - backing_group: GroupIndex::from(1), - }) - ); - assert_eq!( - >::get(&chain_b), - Some(candidate_b.commitments), + Some( + [CandidatePendingAvailability { + core: CoreIndex::from(1), + hash: candidate_b.hash(), + descriptor: candidate_b.descriptor, + availability_votes: default_availability_votes(), + relay_parent_number: System::block_number() - 1, + backed_in_number: System::block_number(), + backers, + backing_group: GroupIndex::from(1), + commitments: candidate_b.commitments, + }] + .into_iter() + .collect::>() + ) ); assert_eq!( >::get(&thread_a), - Some(CandidatePendingAvailability { - core: CoreIndex::from(2), - hash: candidate_c.hash(), - descriptor: candidate_c.descriptor, - availability_votes: default_availability_votes(), - relay_parent_number: System::block_number() - 1, - backed_in_number: System::block_number(), - backers: backing_bitfield(&[4]), - backing_group: GroupIndex::from(2), - }) - ); - assert_eq!( - >::get(&thread_a), - Some(candidate_c.commitments), + Some( + [CandidatePendingAvailability { + core: CoreIndex::from(2), + hash: candidate_c.hash(), + descriptor: candidate_c.descriptor, + availability_votes: default_availability_votes(), + relay_parent_number: System::block_number() - 1, + backed_in_number: System::block_number(), + backers: backing_bitfield(&[4]), + backing_group: GroupIndex::from(2), + commitments: candidate_c.commitments + }] + .into_iter() + .collect::>() + ) ); }); } @@ -1750,11 +2104,17 @@ fn backing_works_with_elastic_scaling_mvp() { .build(); collator_sign_candidate(Sr25519Keyring::One, &mut candidate_b_1); + // Make candidate b2 a child of b1. let mut candidate_b_2 = TestCandidateBuilder { para_id: chain_b, relay_parent: System::parent_hash(), pov_hash: Hash::repeat_byte(3), - persisted_validation_data_hash: make_vdata_hash(chain_b).unwrap(), + persisted_validation_data_hash: make_persisted_validation_data_with_parent::( + RELAY_PARENT_NUM, + Default::default(), + candidate_b_1.commitments.head_data.clone(), + ) + .hash(), hrmp_watermark: RELAY_PARENT_NUM, ..Default::default() } @@ -1791,18 +2151,25 @@ fn backing_works_with_elastic_scaling_mvp() { Some(CoreIndex(2)), ); - let backed_candidates = vec![ - (backed_a.clone(), CoreIndex(0)), - (backed_b_1.clone(), CoreIndex(1)), - (backed_b_2.clone(), CoreIndex(2)), - ]; + let mut backed_candidates = BTreeMap::new(); + backed_candidates.insert(chain_a, vec![(backed_a, CoreIndex(0))]); + backed_candidates + .insert(chain_b, vec![(backed_b_1, CoreIndex(1)), (backed_b_2, CoreIndex(2))]); + let get_backing_group_idx = { // the order defines the group implicitly for this test case let backed_candidates_with_groups = backed_candidates - .iter() + .values() .enumerate() - .map(|(idx, (backed_candidate, _))| (backed_candidate.hash(), GroupIndex(idx as _))) - .collect::>(); + .map(|(idx, backed_candidates)| { + backed_candidates + .iter() + .enumerate() + .map(|(i, c)| (c.0.hash(), GroupIndex((idx + i) as _))) + .collect() + }) + .collect::>>() + .concat(); move |candidate_hash_x: CandidateHash| -> Option { backed_candidates_with_groups.iter().find_map(|(candidate_hash, grp)| { @@ -1820,14 +2187,13 @@ fn backing_works_with_elastic_scaling_mvp() { candidate_receipt_with_backing_validator_indices, } = ParaInclusion::process_candidates( &allowed_relay_parents, - backed_candidates.clone(), + &backed_candidates, &group_validators, true, ) .expect("candidates scheduled, in order, and backed"); - // Both b candidates will be backed. However, only one will be recorded on-chain and proceed - // with being made available. + // Both b candidates will be backed. assert_eq!( occupied_cores, vec![ @@ -1842,26 +2208,29 @@ fn backing_works_with_elastic_scaling_mvp() { CandidateHash, (CandidateReceipt, Vec<(ValidatorIndex, ValidityAttestation)>), >::new(); - backed_candidates.into_iter().for_each(|(backed_candidate, _)| { - let candidate_receipt_with_backers = expected - .entry(backed_candidate.hash()) - .or_insert_with(|| (backed_candidate.receipt(), Vec::new())); - let (validator_indices, _maybe_core_index) = - backed_candidate.validator_indices_and_core_index(true); - assert_eq!(backed_candidate.validity_votes().len(), validator_indices.count_ones()); - candidate_receipt_with_backers.1.extend( - validator_indices - .iter() - .enumerate() - .filter(|(_, signed)| **signed) - .zip(backed_candidate.validity_votes().iter().cloned()) - .filter_map(|((validator_index_within_group, _), attestation)| { - let grp_idx = get_backing_group_idx(backed_candidate.hash()).unwrap(); - group_validators(grp_idx).map(|validator_indices| { - (validator_indices[validator_index_within_group], attestation) - }) - }), - ); + backed_candidates.values().for_each(|backed_candidates| { + for backed_candidate in backed_candidates { + let backed_candidate = backed_candidate.0.clone(); + let candidate_receipt_with_backers = expected + .entry(backed_candidate.hash()) + .or_insert_with(|| (backed_candidate.receipt(), Vec::new())); + let (validator_indices, _maybe_core_index) = + backed_candidate.validator_indices_and_core_index(true); + assert_eq!(backed_candidate.validity_votes().len(), validator_indices.count_ones()); + candidate_receipt_with_backers.1.extend( + validator_indices + .iter() + .enumerate() + .filter(|(_, signed)| **signed) + .zip(backed_candidate.validity_votes().iter().cloned()) + .filter_map(|((validator_index_within_group, _), attestation)| { + let grp_idx = get_backing_group_idx(backed_candidate.hash()).unwrap(); + group_validators(grp_idx).map(|validator_indices| { + (validator_indices[validator_index_within_group], attestation) + }) + }), + ); + } }); assert_eq!( @@ -1881,39 +2250,54 @@ fn backing_works_with_elastic_scaling_mvp() { }; assert_eq!( >::get(&chain_a), - Some(CandidatePendingAvailability { - core: CoreIndex::from(0), - hash: candidate_a.hash(), - descriptor: candidate_a.descriptor, - availability_votes: default_availability_votes(), - relay_parent_number: System::block_number() - 1, - backed_in_number: System::block_number(), - backers, - backing_group: GroupIndex::from(0), - }) - ); - assert_eq!( - >::get(&chain_a), - Some(candidate_a.commitments), + Some( + [CandidatePendingAvailability { + core: CoreIndex::from(0), + hash: candidate_a.hash(), + descriptor: candidate_a.descriptor, + availability_votes: default_availability_votes(), + relay_parent_number: System::block_number() - 1, + backed_in_number: System::block_number(), + backers, + backing_group: GroupIndex::from(0), + commitments: candidate_a.commitments + }] + .into_iter() + .collect::>() + ) ); - // Only one candidate for b will be recorded on chain. + // Both candidates of b will be recorded on chain. assert_eq!( >::get(&chain_b), - Some(CandidatePendingAvailability { - core: CoreIndex::from(2), - hash: candidate_b_2.hash(), - descriptor: candidate_b_2.descriptor, - availability_votes: default_availability_votes(), - relay_parent_number: System::block_number() - 1, - backed_in_number: System::block_number(), - backers: backing_bitfield(&[4]), - backing_group: GroupIndex::from(2), - }) - ); - assert_eq!( - >::get(&chain_b), - Some(candidate_b_2.commitments), + Some( + [ + CandidatePendingAvailability { + core: CoreIndex::from(1), + hash: candidate_b_1.hash(), + descriptor: candidate_b_1.descriptor, + availability_votes: default_availability_votes(), + relay_parent_number: System::block_number() - 1, + backed_in_number: System::block_number(), + backers: backing_bitfield(&[2, 3]), + backing_group: GroupIndex::from(1), + commitments: candidate_b_1.commitments + }, + CandidatePendingAvailability { + core: CoreIndex::from(2), + hash: candidate_b_2.hash(), + descriptor: candidate_b_2.descriptor, + availability_votes: default_availability_votes(), + relay_parent_number: System::block_number() - 1, + backed_in_number: System::block_number(), + backers: backing_bitfield(&[4]), + backing_group: GroupIndex::from(2), + commitments: candidate_b_2.commitments + } + ] + .into_iter() + .collect::>() + ) ); }); } @@ -1998,8 +2382,10 @@ fn can_include_candidate_with_ok_code_upgrade() { let ProcessedCandidates { core_indices: occupied_cores, .. } = ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed_a, chain_a_assignment.1)], - &group_validators, + &vec![(chain_a_assignment.0, vec![(backed_a, chain_a_assignment.1)])] + .into_iter() + .collect::>(), + group_validators, false, ) .expect("candidates scheduled, in order, and backed"); @@ -2015,20 +2401,21 @@ fn can_include_candidate_with_ok_code_upgrade() { }; assert_eq!( >::get(&chain_a), - Some(CandidatePendingAvailability { - core: CoreIndex::from(0), - hash: candidate_a.hash(), - descriptor: candidate_a.descriptor, - availability_votes: default_availability_votes(), - relay_parent_number: System::block_number() - 1, - backed_in_number: System::block_number(), - backers, - backing_group: GroupIndex::from(0), - }) - ); - assert_eq!( - >::get(&chain_a), - Some(candidate_a.commitments), + Some( + [CandidatePendingAvailability { + core: CoreIndex::from(0), + hash: candidate_a.hash(), + descriptor: candidate_a.descriptor, + availability_votes: default_availability_votes(), + relay_parent_number: System::block_number() - 1, + backed_in_number: System::block_number(), + backers, + backing_group: GroupIndex::from(0), + commitments: candidate_a.commitments + }] + .into_iter() + .collect::>() + ) ); }); } @@ -2209,14 +2596,16 @@ fn check_allowed_relay_parents() { ); let backed_candidates = vec![ - (backed_a, chain_a_assignment.1), - (backed_b, chain_b_assignment.1), - (backed_c, thread_a_assignment.1), - ]; + (chain_a_assignment.0, vec![(backed_a, chain_a_assignment.1)]), + (chain_b_assignment.0, vec![(backed_b, chain_b_assignment.1)]), + (thread_a_assignment.0, vec![(backed_c, thread_a_assignment.1)]), + ] + .into_iter() + .collect::>(); ParaInclusion::process_candidates( &allowed_relay_parents, - backed_candidates.clone(), + &backed_candidates, &group_validators, false, ) @@ -2264,25 +2653,10 @@ fn session_change_wipes() { run_to_block(10, |_| None); - >::insert( - &ValidatorIndex(0), - AvailabilityBitfieldRecord { bitfield: default_bitfield(), submitted_at: 9 }, - ); - - >::insert( - &ValidatorIndex(1), - AvailabilityBitfieldRecord { bitfield: default_bitfield(), submitted_at: 9 }, - ); - - >::insert( - &ValidatorIndex(4), - AvailabilityBitfieldRecord { bitfield: default_bitfield(), submitted_at: 9 }, - ); - let candidate = TestCandidateBuilder::default().build(); >::insert( &chain_a, - CandidatePendingAvailability { + [CandidatePendingAvailability { core: CoreIndex::from(0), hash: candidate.hash(), descriptor: candidate.descriptor.clone(), @@ -2291,13 +2665,15 @@ fn session_change_wipes() { backed_in_number: 6, backers: default_backing_bitfield(), backing_group: GroupIndex::from(0), - }, + commitments: candidate.commitments.clone(), + }] + .into_iter() + .collect::>(), ); - >::insert(&chain_a, candidate.commitments.clone()); >::insert( &chain_b, - CandidatePendingAvailability { + [CandidatePendingAvailability { core: CoreIndex::from(1), hash: candidate.hash(), descriptor: candidate.descriptor, @@ -2306,22 +2682,18 @@ fn session_change_wipes() { backed_in_number: 7, backers: default_backing_bitfield(), backing_group: GroupIndex::from(1), - }, + commitments: candidate.commitments, + }] + .into_iter() + .collect::>(), ); - >::insert(&chain_b, candidate.commitments); run_to_block(11, |_| None); assert_eq!(shared::Pallet::::session_index(), 5); - assert!(>::get(&ValidatorIndex(0)).is_some()); - assert!(>::get(&ValidatorIndex(1)).is_some()); - assert!(>::get(&ValidatorIndex(4)).is_some()); - assert!(>::get(&chain_a).is_some()); assert!(>::get(&chain_b).is_some()); - assert!(>::get(&chain_a).is_some()); - assert!(>::get(&chain_b).is_some()); run_to_block(12, |n| match n { 12 => Some(SessionChangeNotification { @@ -2337,18 +2709,7 @@ fn session_change_wipes() { assert_eq!(shared::Pallet::::session_index(), 6); - assert!(>::get(&ValidatorIndex(0)).is_none()); - assert!(>::get(&ValidatorIndex(1)).is_none()); - assert!(>::get(&ValidatorIndex(4)).is_none()); - - assert!(>::get(&chain_a).is_none()); - assert!(>::get(&chain_b).is_none()); - assert!(>::get(&chain_a).is_none()); - assert!(>::get(&chain_b).is_none()); - - assert!(>::iter().collect::>().is_empty()); assert!(>::iter().collect::>().is_empty()); - assert!(>::iter().collect::>().is_empty()); }); } @@ -2420,11 +2781,6 @@ fn para_upgrade_delay_scheduled_from_inclusion() { ]]; Scheduler::set_validator_groups(validator_groups); - let core_lookup = |core| match core { - core if core == CoreIndex::from(0) => Some(chain_a), - _ => None, - }; - let allowed_relay_parents = default_allowed_relay_parent_tracker(); let chain_a_assignment = (chain_a, CoreIndex::from(0)); @@ -2453,7 +2809,9 @@ fn para_upgrade_delay_scheduled_from_inclusion() { let ProcessedCandidates { core_indices: occupied_cores, .. } = ParaInclusion::process_candidates( &allowed_relay_parents, - vec![(backed_a, chain_a_assignment.1)], + &vec![(chain_a_assignment.0, vec![(backed_a, chain_a_assignment.1)])] + .into_iter() + .collect::>(), &group_validators, false, ) @@ -2488,11 +2846,10 @@ fn para_upgrade_delay_scheduled_from_inclusion() { expected_bits(), ); - let v = process_bitfields(expected_bits(), checked_bitfields, core_lookup); + let v = process_bitfields(checked_bitfields); assert_eq!(vec![(CoreIndex(0), candidate_a.hash())], v); - assert!(>::get(&chain_a).is_none()); - assert!(>::get(&chain_a).is_none()); + assert!(>::get(&chain_a).unwrap().is_empty()); let active_vote_state = paras::Pallet::::active_vote_state(&new_validation_code_hash) .expect("prechecking must be initiated"); @@ -2500,7 +2857,7 @@ fn para_upgrade_delay_scheduled_from_inclusion() { let cause = &active_vote_state.causes()[0]; // Upgrade block is the block of inclusion, not candidate's parent. assert_matches!(cause, - paras::PvfCheckCause::Upgrade { id, included_at, set_go_ahead: SetGoAhead::Yes } + paras::PvfCheckCause::Upgrade { id, included_at, upgrade_strategy: UpgradeStrategy::SetGoAheadSignal } if id == &chain_a && included_at == &7 ); }); diff --git a/polkadot/runtime/parachains/src/lib.rs b/polkadot/runtime/parachains/src/lib.rs index 36eec108b689..dba2b9ac8d41 100644 --- a/polkadot/runtime/parachains/src/lib.rs +++ b/polkadot/runtime/parachains/src/lib.rs @@ -56,7 +56,7 @@ mod mock; mod ump_tests; pub use origin::{ensure_parachain, Origin}; -pub use paras::{ParaLifecycle, SetGoAhead}; +pub use paras::{ParaLifecycle, UpgradeStrategy}; use primitives::{HeadData, Id as ParaId, ValidationCode}; use sp_runtime::{DispatchResult, FixedU128}; @@ -106,7 +106,7 @@ pub fn schedule_parachain_downgrade(id: ParaId) -> Result<(), pub fn schedule_code_upgrade( id: ParaId, new_code: ValidationCode, - set_go_ahead: SetGoAhead, + set_go_ahead: UpgradeStrategy, ) -> DispatchResult { paras::Pallet::::schedule_code_upgrade_external(id, new_code, set_go_ahead) } diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs index c18770423673..9ba9b2c77a29 100644 --- a/polkadot/runtime/parachains/src/mock.rs +++ b/polkadot/runtime/parachains/src/mock.rs @@ -365,6 +365,7 @@ impl pallet_message_queue::Config for Test { type HeapSize = ConstU32<65536>; type MaxStale = ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = (); } parameter_types! { diff --git a/polkadot/runtime/parachains/src/paras/benchmarking.rs b/polkadot/runtime/parachains/src/paras/benchmarking.rs index 554f0c15af24..56e6ff153c15 100644 --- a/polkadot/runtime/parachains/src/paras/benchmarking.rs +++ b/polkadot/runtime/parachains/src/paras/benchmarking.rs @@ -129,7 +129,7 @@ benchmarks! { ValidationCode(vec![0]), expired, &config, - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); }: _(RawOrigin::Root, para_id, new_head) verify { diff --git a/polkadot/runtime/parachains/src/paras/benchmarking/pvf_check.rs b/polkadot/runtime/parachains/src/paras/benchmarking/pvf_check.rs index 53ccc35c477c..0bf5aa86c40f 100644 --- a/polkadot/runtime/parachains/src/paras/benchmarking/pvf_check.rs +++ b/polkadot/runtime/parachains/src/paras/benchmarking/pvf_check.rs @@ -177,7 +177,7 @@ where validation_code, /* relay_parent_number */ 1u32.into(), &configuration::Pallet::::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); } else { let r = Pallet::::schedule_para_initialize( diff --git a/polkadot/runtime/parachains/src/paras/mod.rs b/polkadot/runtime/parachains/src/paras/mod.rs index 594ba7f70fca..1267e81af9f6 100644 --- a/polkadot/runtime/parachains/src/paras/mod.rs +++ b/polkadot/runtime/parachains/src/paras/mod.rs @@ -387,16 +387,32 @@ pub(crate) enum PvfCheckCause { /// /// See https://github.com/paritytech/polkadot/issues/4601 for detailed explanation. included_at: BlockNumber, - /// Whether or not the given para should be sent the `GoAhead` signal. - set_go_ahead: SetGoAhead, + /// Whether or not the upgrade should be enacted directly. + /// + /// If set to `Yes` it means that no `GoAheadSignal` will be set and the parachain code + /// will also be overwritten directly. + upgrade_strategy: UpgradeStrategy, }, } -/// Should the `GoAhead` signal be set after a successful check of the new wasm binary? +/// The strategy on how to handle a validation code upgrade. +/// +/// When scheduling a parachain code upgrade the upgrade first is checked by all validators. The +/// validators ensure that the new validation code can be compiled and instantiated. After the +/// majority of the validators have reported their checking result the upgrade is either scheduled +/// or aborted. This strategy then comes into play around the relay chain block this upgrade was +/// scheduled in. #[derive(Debug, Copy, Clone, PartialEq, TypeInfo, Decode, Encode)] -pub enum SetGoAhead { - Yes, - No, +pub enum UpgradeStrategy { + /// Set the `GoAhead` signal to inform the parachain that it is time to upgrade. + /// + /// The upgrade will then be applied after the first parachain block was enacted that must have + /// observed the `GoAhead` signal. + SetGoAheadSignal, + /// Apply the upgrade directly at the expected relay chain block. + /// + /// This doesn't wait for the parachain to make any kind of progress. + ApplyAtExpectedBlock, } impl PvfCheckCause { @@ -759,7 +775,8 @@ pub mod pallet { pub(super) type PastCodePruning = StorageValue<_, Vec<(ParaId, BlockNumberFor)>, ValueQuery>; - /// The block number at which the planned code change is expected for a para. + /// The block number at which the planned code change is expected for a parachain. + /// /// The change will be applied after the first parablock for this ID included which executes /// in the context of a relay chain block with a number >= `expected_at`. #[pallet::storage] @@ -767,6 +784,18 @@ pub mod pallet { pub(super) type FutureCodeUpgrades = StorageMap<_, Twox64Concat, ParaId, BlockNumberFor>; + /// The list of upcoming future code upgrades. + /// + /// Each item is a pair of the parachain and the expected block at which the upgrade should be + /// applied. The upgrade will be applied at the given relay chain block. In contrast to + /// [`FutureCodeUpgrades`] this code upgrade will be applied regardless the parachain making any + /// progress or not. + /// + /// Ordered ascending by block number. + #[pallet::storage] + pub(super) type FutureCodeUpgradesAt = + StorageValue<_, Vec<(ParaId, BlockNumberFor)>, ValueQuery>; + /// The actual future code hash of a para. /// /// Corresponding code can be retrieved with [`CodeByHash`]. @@ -810,8 +839,10 @@ pub mod pallet { pub(super) type UpgradeCooldowns = StorageValue<_, Vec<(ParaId, BlockNumberFor)>, ValueQuery>; - /// The list of upcoming code upgrades. Each item is a pair of which para performs a code - /// upgrade and at which relay-chain block it is expected at. + /// The list of upcoming code upgrades. + /// + /// Each item is a pair of which para performs a code upgrade and at which relay-chain block it + /// is expected at. /// /// Ordered ascending by block number. #[pallet::storage] @@ -881,21 +912,9 @@ pub mod pallet { new_code: ValidationCode, ) -> DispatchResult { ensure_root(origin)?; - let maybe_prior_code_hash = CurrentCodeHash::::get(¶); let new_code_hash = new_code.hash(); Self::increase_code_ref(&new_code_hash, &new_code); - CurrentCodeHash::::insert(¶, new_code_hash); - - let now = frame_system::Pallet::::block_number(); - if let Some(prior_code_hash) = maybe_prior_code_hash { - Self::note_past_code(para, now, now, prior_code_hash); - } else { - log::error!( - target: LOG_TARGET, - "Pallet paras storage is inconsistent, prior code not found {:?}", - ¶ - ); - } + Self::set_current_code(para, new_code_hash, frame_system::Pallet::::block_number()); Self::deposit_event(Event::CurrentCodeUpdated(para)); Ok(()) } @@ -929,7 +948,7 @@ pub mod pallet { new_code, relay_parent_number, &config, - SetGoAhead::No, + UpgradeStrategy::ApplyAtExpectedBlock, ); Self::deposit_event(Event::CodeUpgradeScheduled(para)); Ok(()) @@ -1228,7 +1247,7 @@ impl Pallet { pub(crate) fn schedule_code_upgrade_external( id: ParaId, new_code: ValidationCode, - set_go_ahead: SetGoAhead, + upgrade_strategy: UpgradeStrategy, ) -> DispatchResult { // Check that we can schedule an upgrade at all. ensure!(Self::can_upgrade_validation_code(id), Error::::CannotUpgradeCode); @@ -1240,7 +1259,7 @@ impl Pallet { let current_block = frame_system::Pallet::::block_number(); // Schedule the upgrade with a delay just like if a parachain triggered the upgrade. let upgrade_block = current_block.saturating_add(config.validation_upgrade_delay); - Self::schedule_code_upgrade(id, new_code, upgrade_block, &config, set_go_ahead); + Self::schedule_code_upgrade(id, new_code, upgrade_block, &config, upgrade_strategy); Self::deposit_event(Event::CodeUpgradeScheduled(id)); Ok(()) } @@ -1253,8 +1272,9 @@ impl Pallet { /// Called by the initializer to initialize the paras pallet. pub(crate) fn initializer_initialize(now: BlockNumberFor) -> Weight { - let weight = Self::prune_old_code(now); - weight + Self::process_scheduled_upgrade_changes(now) + Self::prune_old_code(now) + + Self::process_scheduled_upgrade_changes(now) + + Self::process_future_code_upgrades_at(now) } /// Called by the initializer to finalize the paras pallet. @@ -1356,16 +1376,13 @@ impl Pallet { // NOTE both of those iterates over the list and the outgoing. We do not expect either // of these to be large. Thus should be fine. UpcomingUpgrades::::mutate(|upcoming_upgrades| { - *upcoming_upgrades = mem::take(upcoming_upgrades) - .into_iter() - .filter(|(para, _)| !outgoing.contains(para)) - .collect(); + upcoming_upgrades.retain(|(para, _)| !outgoing.contains(para)); }); UpgradeCooldowns::::mutate(|upgrade_cooldowns| { - *upgrade_cooldowns = mem::take(upgrade_cooldowns) - .into_iter() - .filter(|(para, _)| !outgoing.contains(para)) - .collect(); + upgrade_cooldowns.retain(|(para, _)| !outgoing.contains(para)); + }); + FutureCodeUpgradesAt::::mutate(|future_upgrades| { + future_upgrades.retain(|(para, _)| !outgoing.contains(para)); }); } @@ -1461,6 +1478,37 @@ impl Pallet { T::DbWeight::get().reads_writes(1 + pruning_tasks_done, 2 * pruning_tasks_done) } + /// Process the future code upgrades that should be applied directly. + /// + /// Upgrades that should not be applied directly are being processed in + /// [`Self::process_scheduled_upgrade_changes`]. + fn process_future_code_upgrades_at(now: BlockNumberFor) -> Weight { + // account weight for `FutureCodeUpgradeAt::mutate`. + let mut weight = T::DbWeight::get().reads_writes(1, 1); + FutureCodeUpgradesAt::::mutate( + |upcoming_upgrades: &mut Vec<(ParaId, BlockNumberFor)>| { + let num = upcoming_upgrades.iter().take_while(|&(_, at)| at <= &now).count(); + for (id, expected_at) in upcoming_upgrades.drain(..num) { + weight += T::DbWeight::get().reads_writes(1, 1); + + // Both should always be `Some` in this case, since a code upgrade is scheduled. + let new_code_hash = if let Some(new_code_hash) = FutureCodeHash::::take(&id) + { + new_code_hash + } else { + log::error!(target: LOG_TARGET, "Missing future code hash for {:?}", &id); + continue + }; + + weight += Self::set_current_code(id, new_code_hash, expected_at); + } + num + }, + ); + + weight + } + /// Process the timers related to upgrades. Specifically, the upgrade go ahead signals toggle /// and the upgrade cooldown restrictions. However, this function does not actually unset /// the upgrade restriction, that will happen in the `initializer_finalize` function. However, @@ -1581,14 +1629,14 @@ impl Pallet { PvfCheckCause::Onboarding(id) => { weight += Self::proceed_with_onboarding(*id, sessions_observed); }, - PvfCheckCause::Upgrade { id, included_at, set_go_ahead } => { + PvfCheckCause::Upgrade { id, included_at, upgrade_strategy } => { weight += Self::proceed_with_upgrade( *id, code_hash, now, *included_at, cfg, - *set_go_ahead, + *upgrade_strategy, ); }, } @@ -1622,38 +1670,50 @@ impl Pallet { now: BlockNumberFor, relay_parent_number: BlockNumberFor, cfg: &configuration::HostConfiguration>, - set_go_ahead: SetGoAhead, + upgrade_strategy: UpgradeStrategy, ) -> Weight { let mut weight = Weight::zero(); - // Compute the relay-chain block number starting at which the code upgrade is ready to be - // applied. + // Compute the relay-chain block number starting at which the code upgrade is ready to + // be applied. // - // The first parablock that has a relay-parent higher or at the same height of `expected_at` - // will trigger the code upgrade. The parablock that comes after that will be validated - // against the new validation code. + // The first parablock that has a relay-parent higher or at the same height of + // `expected_at` will trigger the code upgrade. The parablock that comes after that will + // be validated against the new validation code. // - // Here we are trying to choose the block number that will have `validation_upgrade_delay` - // blocks from the relay-parent of inclusion of the the block that scheduled code upgrade - // but no less than `minimum_validation_upgrade_delay`. We want this delay out of caution - // so that when the last vote for pre-checking comes the parachain will have some time until - // the upgrade finally takes place. + // Here we are trying to choose the block number that will have + // `validation_upgrade_delay` blocks from the relay-parent of inclusion of the the block + // that scheduled code upgrade but no less than `minimum_validation_upgrade_delay`. We + // want this delay out of caution so that when the last vote for pre-checking comes the + // parachain will have some time until the upgrade finally takes place. let expected_at = cmp::max( relay_parent_number + cfg.validation_upgrade_delay, now + cfg.minimum_validation_upgrade_delay, ); - weight += T::DbWeight::get().reads_writes(1, 4); - FutureCodeUpgrades::::insert(&id, expected_at); + match upgrade_strategy { + UpgradeStrategy::ApplyAtExpectedBlock => { + FutureCodeUpgradesAt::::mutate(|future_upgrades| { + let insert_idx = future_upgrades + .binary_search_by_key(&expected_at, |&(_, b)| b) + .unwrap_or_else(|idx| idx); + future_upgrades.insert(insert_idx, (id, expected_at)); + }); - // Only set an upcoming upgrade if `GoAhead` signal should be set for the respective para. - if set_go_ahead == SetGoAhead::Yes { - UpcomingUpgrades::::mutate(|upcoming_upgrades| { - let insert_idx = upcoming_upgrades - .binary_search_by_key(&expected_at, |&(_, b)| b) - .unwrap_or_else(|idx| idx); - upcoming_upgrades.insert(insert_idx, (id, expected_at)); - }); + weight += T::DbWeight::get().reads_writes(0, 2); + }, + UpgradeStrategy::SetGoAheadSignal => { + FutureCodeUpgrades::::insert(&id, expected_at); + + UpcomingUpgrades::::mutate(|upcoming_upgrades| { + let insert_idx = upcoming_upgrades + .binary_search_by_key(&expected_at, |&(_, b)| b) + .unwrap_or_else(|idx| idx); + upcoming_upgrades.insert(insert_idx, (id, expected_at)); + }); + + weight += T::DbWeight::get().reads_writes(1, 3); + }, } let expected_at = expected_at.saturated_into(); @@ -1757,7 +1817,7 @@ impl Pallet { // // - Empty value is treated as the current code is already inserted during the onboarding. // - // This is only an intermediate solution and should be fixed in foreseable future. + // This is only an intermediate solution and should be fixed in foreseeable future. // // [soaking issue]: https://github.com/paritytech/polkadot/issues/3918 let validation_code = @@ -1893,7 +1953,7 @@ impl Pallet { new_code: ValidationCode, inclusion_block_number: BlockNumberFor, cfg: &configuration::HostConfiguration>, - set_go_ahead: SetGoAhead, + upgrade_strategy: UpgradeStrategy, ) -> Weight { let mut weight = T::DbWeight::get().reads(1); @@ -1950,7 +2010,7 @@ impl Pallet { }); weight += Self::kick_off_pvf_check( - PvfCheckCause::Upgrade { id, included_at: inclusion_block_number, set_go_ahead }, + PvfCheckCause::Upgrade { id, included_at: inclusion_block_number, upgrade_strategy }, code_hash, new_code, cfg, @@ -2062,24 +2122,10 @@ impl Pallet { log::error!(target: LOG_TARGET, "Missing future code hash for {:?}", &id); return T::DbWeight::get().reads_writes(3, 1 + 3) }; - let maybe_prior_code_hash = CurrentCodeHash::::get(&id); - CurrentCodeHash::::insert(&id, &new_code_hash); - let log = ConsensusLog::ParaUpgradeCode(id, new_code_hash); - >::deposit_log(log.into()); + let weight = Self::set_current_code(id, new_code_hash, expected_at); - // `now` is only used for registering pruning as part of `fn note_past_code` - let now = >::block_number(); - - let weight = if let Some(prior_code_hash) = maybe_prior_code_hash { - Self::note_past_code(id, expected_at, now, prior_code_hash) - } else { - log::error!(target: LOG_TARGET, "Missing prior code hash for para {:?}", &id); - Weight::zero() - }; - - // add 1 to writes due to heads update. - weight + T::DbWeight::get().reads_writes(3, 1 + 3) + weight + T::DbWeight::get().reads_writes(3, 3) } else { T::DbWeight::get().reads_writes(1, 1 + 0) } @@ -2095,6 +2141,34 @@ impl Pallet { weight.saturating_add(T::OnNewHead::on_new_head(id, &new_head)) } + /// Set the current code for the given parachain. + // `at` for para-triggered replacement is the block number of the relay-chain + // block in whose context the parablock was executed + // (i.e. number of `relay_parent` in the receipt) + pub(crate) fn set_current_code( + id: ParaId, + new_code_hash: ValidationCodeHash, + at: BlockNumberFor, + ) -> Weight { + let maybe_prior_code_hash = CurrentCodeHash::::get(&id); + CurrentCodeHash::::insert(&id, &new_code_hash); + + let log = ConsensusLog::ParaUpgradeCode(id, new_code_hash); + >::deposit_log(log.into()); + + // `now` is only used for registering pruning as part of `fn note_past_code` + let now = >::block_number(); + + let weight = if let Some(prior_code_hash) = maybe_prior_code_hash { + Self::note_past_code(id, at, now, prior_code_hash) + } else { + log::error!(target: LOG_TARGET, "Missing prior code hash for para {:?}", &id); + Weight::zero() + }; + + weight + T::DbWeight::get().writes(1) + } + /// Returns the list of PVFs (aka validation code) that require casting a vote by a validator in /// the active validator set. pub(crate) fn pvfs_require_precheck() -> Vec { diff --git a/polkadot/runtime/parachains/src/paras/tests.rs b/polkadot/runtime/parachains/src/paras/tests.rs index 262ec9d3fdba..ad75166271e3 100644 --- a/polkadot/runtime/parachains/src/paras/tests.rs +++ b/polkadot/runtime/parachains/src/paras/tests.rs @@ -451,7 +451,7 @@ fn code_upgrade_applied_after_delay() { new_code.clone(), 1, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); // Include votes for super-majority. submit_super_majority_pvf_votes(&new_code, EXPECTED_SESSION, true); @@ -521,7 +521,7 @@ fn code_upgrade_applied_after_delay() { } #[test] -fn code_upgrade_applied_without_setting_go_ahead_signal() { +fn upgrade_strategy_apply_at_expected_block_works() { let code_retention_period = 10; let validation_upgrade_delay = 5; let validation_upgrade_cooldown = 10; @@ -560,77 +560,42 @@ fn code_upgrade_applied_without_setting_go_ahead_signal() { run_to_block(2, Some(vec![1])); assert_eq!(Paras::current_code(¶_id), Some(original_code.clone())); - let (expected_at, next_possible_upgrade_at) = { - // this parablock is in the context of block 1. - let expected_at = 1 + validation_upgrade_delay; - let next_possible_upgrade_at = 1 + validation_upgrade_cooldown; - // `set_go_ahead` parameter set to `false` which prevents signaling the parachain - // with the `GoAhead` signal. - Paras::schedule_code_upgrade( - para_id, - new_code.clone(), - 1, - &Configuration::config(), - SetGoAhead::No, - ); - // Include votes for super-majority. - submit_super_majority_pvf_votes(&new_code, EXPECTED_SESSION, true); - - Paras::note_new_head(para_id, Default::default(), 1); - - assert!(Paras::past_code_meta(¶_id).most_recent_change().is_none()); - assert_eq!(FutureCodeUpgrades::::get(¶_id), Some(expected_at)); - assert_eq!(FutureCodeHash::::get(¶_id), Some(new_code.hash())); - assert_eq!(UpcomingUpgrades::::get(), vec![]); - assert_eq!(UpgradeCooldowns::::get(), vec![(para_id, next_possible_upgrade_at)]); - assert_eq!(Paras::current_code(¶_id), Some(original_code.clone())); - check_code_is_stored(&original_code); - check_code_is_stored(&new_code); - - (expected_at, next_possible_upgrade_at) - }; + // this parablock is in the context of block 1. + let expected_at = 1 + validation_upgrade_delay; + let next_possible_upgrade_at = 1 + validation_upgrade_cooldown; + // `set_go_ahead` parameter set to `false` which prevents signaling the parachain + // with the `GoAhead` signal. + Paras::schedule_code_upgrade( + para_id, + new_code.clone(), + 1, + &Configuration::config(), + UpgradeStrategy::ApplyAtExpectedBlock, + ); + // Include votes for super-majority. + submit_super_majority_pvf_votes(&new_code, EXPECTED_SESSION, true); + assert!(FutureCodeUpgradesAt::::get().iter().any(|(id, _)| *id == para_id)); + // Going to the expected block triggers the upgrade directly. run_to_block(expected_at, None); - // the candidate is in the context of the parent of `expected_at`, - // thus does not trigger the code upgrade. However, now the `UpgradeGoAheadSignal` - // should not be set. - { - Paras::note_new_head(para_id, Default::default(), expected_at - 1); - - assert!(Paras::past_code_meta(¶_id).most_recent_change().is_none()); - assert_eq!(FutureCodeUpgrades::::get(¶_id), Some(expected_at)); - assert_eq!(FutureCodeHash::::get(¶_id), Some(new_code.hash())); - assert!(UpgradeGoAheadSignal::::get(¶_id).is_none()); - assert_eq!(Paras::current_code(¶_id), Some(original_code.clone())); - check_code_is_stored(&original_code); - check_code_is_stored(&new_code); - } - - run_to_block(expected_at + 1, None); - - // the candidate is in the context of `expected_at`, and triggers - // the upgrade. - { - Paras::note_new_head(para_id, Default::default(), expected_at); + // Reporting a head doesn't change anything. + Paras::note_new_head(para_id, Default::default(), expected_at - 1); - assert_eq!(Paras::past_code_meta(¶_id).most_recent_change(), Some(expected_at)); - assert_eq!( - PastCodeHash::::get(&(para_id, expected_at)), - Some(original_code.hash()), - ); - assert!(FutureCodeUpgrades::::get(¶_id).is_none()); - assert!(FutureCodeHash::::get(¶_id).is_none()); - assert!(UpgradeGoAheadSignal::::get(¶_id).is_none()); - assert_eq!(Paras::current_code(¶_id), Some(new_code.clone())); - assert_eq!( - UpgradeRestrictionSignal::::get(¶_id), - Some(UpgradeRestriction::Present), - ); - assert_eq!(UpgradeCooldowns::::get(), vec![(para_id, next_possible_upgrade_at)]); - check_code_is_stored(&original_code); - check_code_is_stored(&new_code); - } + assert_eq!(Paras::past_code_meta(¶_id).most_recent_change(), Some(expected_at)); + assert_eq!(PastCodeHash::::get(&(para_id, expected_at)), Some(original_code.hash())); + assert!(FutureCodeUpgrades::::get(¶_id).is_none()); + assert!(FutureCodeUpgradesAt::::get().iter().all(|(id, _)| *id != para_id)); + assert!(FutureCodeHash::::get(¶_id).is_none()); + assert!(UpgradeGoAheadSignal::::get(¶_id).is_none()); + assert_eq!(Paras::current_code(¶_id), Some(new_code.clone())); + assert_eq!( + UpgradeRestrictionSignal::::get(¶_id), + Some(UpgradeRestriction::Present), + ); + assert_eq!(UpgradeCooldowns::::get(), vec![(para_id, next_possible_upgrade_at)]); + check_code_is_stored(&original_code); + check_code_is_stored(&new_code); run_to_block(next_possible_upgrade_at + 1, None); @@ -688,7 +653,7 @@ fn code_upgrade_applied_after_delay_even_when_late() { new_code.clone(), 1, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); // Include votes for super-majority. submit_super_majority_pvf_votes(&new_code, EXPECTED_SESSION, true); @@ -772,7 +737,7 @@ fn submit_code_change_when_not_allowed_is_err() { new_code.clone(), 1, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); // Include votes for super-majority. submit_super_majority_pvf_votes(&new_code, EXPECTED_SESSION, true); @@ -790,7 +755,7 @@ fn submit_code_change_when_not_allowed_is_err() { newer_code.clone(), 2, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); assert_eq!( FutureCodeUpgrades::::get(¶_id), @@ -854,7 +819,7 @@ fn upgrade_restriction_elapsed_doesnt_mean_can_upgrade() { new_code.clone(), 0, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); // Include votes for super-majority. submit_super_majority_pvf_votes(&new_code, EXPECTED_SESSION, true); @@ -879,7 +844,7 @@ fn upgrade_restriction_elapsed_doesnt_mean_can_upgrade() { newer_code.clone(), 30, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); assert_eq!(FutureCodeUpgrades::::get(¶_id), Some(0 + validation_upgrade_delay)); }); @@ -940,7 +905,7 @@ fn full_parachain_cleanup_storage() { new_code.clone(), 1, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); // Include votes for super-majority. submit_super_majority_pvf_votes(&new_code, EXPECTED_SESSION, true); @@ -1036,7 +1001,7 @@ fn cannot_offboard_ongoing_pvf_check() { new_code.clone(), RELAY_PARENT, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); assert!(!Paras::pvfs_require_precheck().is_empty()); @@ -1194,7 +1159,7 @@ fn code_hash_at_returns_up_to_end_of_code_retention_period() { new_code.clone(), 0, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); // Include votes for super-majority. submit_super_majority_pvf_votes(&new_code, EXPECTED_SESSION, true); @@ -1210,7 +1175,7 @@ fn code_hash_at_returns_up_to_end_of_code_retention_period() { assert_eq!(Paras::past_code_meta(¶_id).upgrade_times, vec![upgrade_at(4, 10)]); assert_eq!(Paras::current_code(¶_id), Some(new_code.clone())); - // Make sure that the old code is available **before** the code retion period passes. + // Make sure that the old code is available **before** the code retention period passes. run_to_block(10 + code_retention_period, None); assert_eq!(Paras::code_by_hash(&old_code.hash()), Some(old_code.clone())); assert_eq!(Paras::code_by_hash(&new_code.hash()), Some(new_code.clone())); @@ -1303,7 +1268,7 @@ fn pvf_check_coalescing_onboarding_and_upgrade() { validation_code.clone(), RELAY_PARENT, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); assert!(!Paras::pvfs_require_precheck().is_empty()); @@ -1413,7 +1378,7 @@ fn pvf_check_upgrade_reject() { new_code.clone(), RELAY_PARENT, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); check_code_is_stored(&new_code); @@ -1599,7 +1564,7 @@ fn include_pvf_check_statement_refunds_weight() { new_code.clone(), RELAY_PARENT, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); let mut stmts = IntoIterator::into_iter([0, 1, 2, 3]) @@ -1700,7 +1665,7 @@ fn poke_unused_validation_code_doesnt_remove_code_with_users() { validation_code.clone(), 1, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); Paras::note_new_head(para_id, HeadData::default(), 1); @@ -1717,7 +1682,7 @@ fn poke_unused_validation_code_doesnt_remove_code_with_users() { #[test] fn increase_code_ref_doesnt_have_allergy_on_add_trusted_validation_code() { - // Verify that accidential calling of increase_code_ref or decrease_code_ref does not lead + // Verify that accidental calling of increase_code_ref or decrease_code_ref does not lead // to a disaster. // NOTE that this test is extra paranoid, as it is not really possible to hit // `decrease_code_ref` without calling `increase_code_ref` first. @@ -1771,7 +1736,7 @@ fn add_trusted_validation_code_insta_approval() { validation_code.clone(), 1, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); Paras::note_new_head(para_id, HeadData::default(), 1); @@ -1813,7 +1778,7 @@ fn add_trusted_validation_code_enacts_existing_pvf_vote() { validation_code.clone(), 1, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); Paras::note_new_head(para_id, HeadData::default(), 1); diff --git a/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs b/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs index a2a9f9a71f79..8416d3a429fe 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs @@ -21,7 +21,7 @@ use core::cmp::min; use frame_benchmarking::{benchmarks, impl_benchmark_test_suite}; use frame_system::RawOrigin; -use primitives::v6::GroupIndex; +use primitives::v7::GroupIndex; use crate::builder::BenchBuilder; @@ -146,10 +146,6 @@ benchmarks! { assert_eq!(backing_validators.1.len(), votes); } - assert_eq!( - inclusion::PendingAvailabilityCommitments::::iter().count(), - cores_with_backed.len() - ); assert_eq!( inclusion::PendingAvailability::::iter().count(), cores_with_backed.len() @@ -210,10 +206,6 @@ benchmarks! { ); } - assert_eq!( - inclusion::PendingAvailabilityCommitments::::iter().count(), - cores_with_backed.len() - ); assert_eq!( inclusion::PendingAvailability::::iter().count(), cores_with_backed.len() diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index e8360d06f246..2329b1f0a351 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -24,8 +24,7 @@ use crate::{ configuration, disputes::DisputesHandler, - inclusion, - inclusion::CandidateCheckContext, + inclusion::{self, CandidateCheckContext}, initializer, metrics::METRICS, paras, @@ -35,6 +34,7 @@ use crate::{ }; use bitvec::prelude::BitVec; use frame_support::{ + defensive, dispatch::{DispatchErrorWithPostInfo, PostDispatchInfo}, inherent::{InherentData, InherentIdentifier, MakeFatalError, ProvideInherent}, pallet_prelude::*, @@ -43,9 +43,9 @@ use frame_support::{ use frame_system::pallet_prelude::*; use pallet_babe::{self, ParentBlockRandomness}; use primitives::{ - effective_minimum_backing_votes, vstaging::node_features::FeatureIndex, BackedCandidate, - CandidateHash, CandidateReceipt, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, - CoreIndex, DisputeStatementSet, InherentData as ParachainsInherentData, + effective_minimum_backing_votes, node_features::FeatureIndex, BackedCandidate, CandidateHash, + CandidateReceipt, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, CoreIndex, + DisputeStatementSet, HeadData, InherentData as ParachainsInherentData, MultiDisputeStatementSet, ScrapedOnChainVotes, SessionIndex, SignedAvailabilityBitfields, SigningContext, UncheckedSignedAvailabilityBitfield, UncheckedSignedAvailabilityBitfields, ValidatorId, ValidatorIndex, ValidityAttestation, PARACHAINS_INHERENT_IDENTIFIER, @@ -134,18 +134,11 @@ pub mod pallet { /// The hash of the submitted parent header doesn't correspond to the saved block hash of /// the parent. InvalidParentHeader, - /// Disputed candidate that was concluded invalid. - CandidateConcludedInvalid, /// The data given to the inherent will result in an overweight block. InherentOverweight, - /// The ordering of dispute statements was invalid. - DisputeStatementsUnsortedOrDuplicates, - /// A dispute statement was invalid. - DisputeInvalid, - /// A candidate was backed by a disabled validator - BackedByDisabled, - /// A candidate was backed even though the paraid was not scheduled. - BackedOnUnscheduledCore, + /// A candidate was filtered during inherent execution. This should have only been done + /// during creation. + CandidatesFilteredDuringExecution, /// Too many candidates supplied. UnscheduledCandidate, } @@ -235,35 +228,6 @@ pub mod pallet { } } - /// Collect all freed cores based on storage data. (i.e. append cores freed from timeouts to - /// the given `freed_concluded`). - /// - /// The parameter `freed_concluded` contains all core indicies that became - /// free due to candidate that became available. - pub(crate) fn collect_all_freed_cores( - freed_concluded: I, - ) -> BTreeMap - where - I: core::iter::IntoIterator, - T: Config, - { - // Handle timeouts for any availability core work. - let freed_timeout = if >::availability_timeout_check_required() { - let pred = >::availability_timeout_predicate(); - >::collect_pending(pred) - } else { - Vec::new() - }; - - // Schedule paras again, given freed cores, and reasons for freeing. - let freed = freed_concluded - .into_iter() - .map(|(c, _hash)| (c, FreedReason::Concluded)) - .chain(freed_timeout.into_iter().map(|c| (c, FreedReason::TimedOut))) - .collect::>(); - freed - } - #[pallet::call] impl Pallet { /// Enter the paras inherent. This will process bitfields and backed candidates. @@ -319,7 +283,7 @@ impl Pallet { /// Process inherent data. /// /// The given inherent data is processed and state is altered accordingly. If any data could - /// not be applied (inconsitencies, weight limit, ...) it is removed. + /// not be applied (inconsistencies, weight limit, ...) it is removed. /// /// When called from `create_inherent` the `context` must be set to /// `ProcessInherentDataContext::ProvideInherent` so it guarantees the invariant that inherent @@ -526,7 +490,7 @@ impl Pallet { // Contains the disputes that are concluded in the current session only, // since these are the only ones that are relevant for the occupied cores - // and lightens the load on `collect_disputed` significantly. + // and lightens the load on `free_disputed` significantly. // Cores can't be occupied with candidates of the previous sessions, and only // things with new votes can have just concluded. We only need to collect // cores with disputes that conclude just now, because disputes that @@ -542,21 +506,17 @@ impl Pallet { .map(|(_session, candidate)| candidate) .collect::>(); - let freed_disputed: BTreeMap = - >::collect_disputed(¤t_concluded_invalid_disputes) + // Get the cores freed as a result of concluded invalid candidates. + let (freed_disputed, concluded_invalid_hashes): (Vec, BTreeSet) = + >::free_disputed(¤t_concluded_invalid_disputes) .into_iter() - .map(|core| (core, FreedReason::Concluded)) - .collect(); + .unzip(); // Create a bit index from the set of core indices where each index corresponds to // a core index that was freed due to a dispute. // // I.e. 010100 would indicate, the candidates on Core 1 and 3 would be disputed. - let disputed_bitfield = create_disputed_bitfield(expected_bits, freed_disputed.keys()); - - if !freed_disputed.is_empty() { - >::free_cores_and_fill_claimqueue(freed_disputed.clone(), now); - } + let disputed_bitfield = create_disputed_bitfield(expected_bits, freed_disputed.iter()); let bitfields = sanitize_bitfields::( bitfields, @@ -571,11 +531,9 @@ impl Pallet { // Process new availability bitfields, yielding any availability cores whose // work has now concluded. let freed_concluded = - >::update_pending_availability_and_get_freed_cores::<_>( - expected_bits, + >::update_pending_availability_and_get_freed_cores( &validator_public[..], bitfields.clone(), - >::core_para, ); // Inform the disputes module of all included candidates. @@ -585,8 +543,24 @@ impl Pallet { METRICS.on_candidates_included(freed_concluded.len() as u64); - let freed = collect_all_freed_cores::(freed_concluded.iter().cloned()); + // Get the timed out candidates + let freed_timeout = if >::availability_timeout_check_required() { + >::free_timedout() + } else { + Vec::new() + }; + + if !freed_timeout.is_empty() { + log::debug!(target: LOG_TARGET, "Evicted timed out cores: {:?}", freed_timeout); + } + // We'll schedule paras again, given freed cores, and reasons for freeing. + let freed = freed_concluded + .into_iter() + .map(|(c, _hash)| (c, FreedReason::Concluded)) + .chain(freed_disputed.into_iter().map(|core| (core, FreedReason::Concluded))) + .chain(freed_timeout.into_iter().map(|c| (c, FreedReason::TimedOut))) + .collect::>(); >::free_cores_and_fill_claimqueue(freed, now); METRICS.on_candidates_processed_total(backed_candidates.len() as u64); @@ -605,55 +579,28 @@ impl Pallet { scheduled.entry(para_id).or_default().insert(core_idx); } - let SanitizedBackedCandidates { - backed_candidates_with_core, - votes_from_disabled_were_dropped, - dropped_unscheduled_candidates, - } = sanitize_backed_candidates::( + let initial_candidate_count = backed_candidates.len(); + let backed_candidates_with_core = sanitize_backed_candidates::( backed_candidates, &allowed_relay_parents, - |candidate_idx: usize, - backed_candidate: &BackedCandidate<::Hash>| - -> bool { - let para_id = backed_candidate.descriptor().para_id; - let prev_context = >::para_most_recent_context(para_id); - let check_ctx = CandidateCheckContext::::new(prev_context); - - // never include a concluded-invalid candidate - current_concluded_invalid_disputes.contains(&backed_candidate.hash()) || - // Instead of checking the candidates with code upgrades twice - // move the checking up here and skip it in the training wheels fallback. - // That way we avoid possible duplicate checks while assuring all - // backed candidates fine to pass on. - // - // NOTE: this is the only place where we check the relay-parent. - check_ctx - .verify_backed_candidate(&allowed_relay_parents, candidate_idx, backed_candidate.candidate()) - .is_err() - }, + concluded_invalid_hashes, scheduled, core_index_enabled, ); + let count = count_backed_candidates(&backed_candidates_with_core); - ensure!( - backed_candidates_with_core.len() <= total_scheduled_cores, - Error::::UnscheduledCandidate - ); - - METRICS.on_candidates_sanitized(backed_candidates_with_core.len() as u64); + ensure!(count <= total_scheduled_cores, Error::::UnscheduledCandidate); - // In `Enter` context (invoked during execution) there should be no backing votes from - // disabled validators because they should have been filtered out during inherent data - // preparation (`ProvideInherent` context). Abort in such cases. - if context == ProcessInherentDataContext::Enter { - ensure!(!votes_from_disabled_were_dropped, Error::::BackedByDisabled); - } + METRICS.on_candidates_sanitized(count as u64); - // In `Enter` context (invoked during execution) we shouldn't have filtered any candidates - // due to a para not being scheduled. They have been filtered during inherent data - // preparation (`ProvideInherent` context). Abort in such cases. + // In `Enter` context (invoked during execution) no more candidates should be filtered, + // because they have already been filtered during `ProvideInherent` context. Abort in such + // cases. if context == ProcessInherentDataContext::Enter { - ensure!(!dropped_unscheduled_candidates, Error::::BackedOnUnscheduledCore); + ensure!( + initial_candidate_count == count, + Error::::CandidatesFilteredDuringExecution + ); } // Process backed candidates according to scheduled cores. @@ -662,7 +609,7 @@ impl Pallet { candidate_receipt_with_backing_validator_indices, } = >::process_candidates( &allowed_relay_parents, - backed_candidates_with_core.clone(), + &backed_candidates_with_core, >::group_validators, core_index_enabled, )?; @@ -683,10 +630,13 @@ impl Pallet { let processed = ParachainsInherentData { bitfields, - backed_candidates: backed_candidates_with_core - .into_iter() - .map(|(candidate, _)| candidate) - .collect(), + backed_candidates: backed_candidates_with_core.into_iter().fold( + Vec::with_capacity(count), + |mut acc, (_id, candidates)| { + acc.extend(candidates.into_iter().map(|(c, _)| c)); + acc + }, + ), disputes, parent_header, }; @@ -783,7 +733,7 @@ fn random_sel Weight>( /// are preferred. And for disputes, local and older disputes are preferred (see /// `limit_and_sanitize_disputes`). for backed candidates, since with a increasing number of /// parachains their chances of inclusion become slim. All backed candidates are checked -/// beforehands in `fn create_inherent_inner` which guarantees sanity. +/// beforehand in `fn create_inherent_inner` which guarantees sanity. /// /// Assumes disputes are already filtered by the time this is called. /// @@ -986,83 +936,86 @@ pub(crate) fn sanitize_bitfields( bitfields } -// Result from `sanitize_backed_candidates` -#[derive(Debug, PartialEq)] -struct SanitizedBackedCandidates { - // Sanitized backed candidates along with the assigned core. The `Vec` is sorted according to - // the occupied core index. - backed_candidates_with_core: Vec<(BackedCandidate, CoreIndex)>, - // Set to true if any votes from disabled validators were dropped from the input. - votes_from_disabled_were_dropped: bool, - // Set to true if any candidates were dropped due to filtering done in - // `map_candidates_to_cores` - dropped_unscheduled_candidates: bool, -} - +/// Performs various filtering on the backed candidates inherent data. +/// Must maintain the invariant that the returned candidate collection contains the candidates +/// sorted in dependency order for each para. When doing any filtering, we must therefore drop any +/// subsequent candidates after the filtered one. +/// /// Filter out: -/// 1. any candidates that have a concluded invalid dispute -/// 2. any unscheduled candidates, as well as candidates whose paraid has multiple cores assigned +/// 1. any candidates which don't form a chain with the other candidates of the paraid (even if they +/// do form a chain but are not in the right order). +/// 2. any candidates that have a concluded invalid dispute or who are descendants of a concluded +/// invalid candidate. +/// 3. any unscheduled candidates, as well as candidates whose paraid has multiple cores assigned /// but have no injected core index. -/// 3. all backing votes from disabled validators -/// 4. any candidates that end up with less than `effective_minimum_backing_votes` backing votes +/// 4. all backing votes from disabled validators +/// 5. any candidates that end up with less than `effective_minimum_backing_votes` backing votes /// -/// `scheduled` follows the same naming scheme as provided in the -/// guide: Currently `free` but might become `occupied`. -/// For the filtering here the relevant part is only the current `free` -/// state. -/// -/// `candidate_has_concluded_invalid_dispute` must return `true` if the candidate -/// is disputed, false otherwise. The passed `usize` is the candidate index. -/// -/// Returns struct `SanitizedBackedCandidates` where `backed_candidates` are sorted according to the -/// occupied core index. -fn sanitize_backed_candidates< - T: crate::inclusion::Config, - F: FnMut(usize, &BackedCandidate) -> bool, ->( - mut backed_candidates: Vec>, +/// Returns the scheduled +/// backed candidates which passed filtering, mapped by para id and in the right dependency order. +fn sanitize_backed_candidates( + backed_candidates: Vec>, allowed_relay_parents: &AllowedRelayParentsTracker>, - mut candidate_has_concluded_invalid_dispute_or_is_invalid: F, + concluded_invalid_with_descendants: BTreeSet, scheduled: BTreeMap>, core_index_enabled: bool, -) -> SanitizedBackedCandidates { - // Remove any candidates that were concluded invalid. - // This does not assume sorting. - backed_candidates.indexed_retain(move |candidate_idx, backed_candidate| { - !candidate_has_concluded_invalid_dispute_or_is_invalid(candidate_idx, backed_candidate) +) -> BTreeMap, CoreIndex)>> { + // Map the candidates to the right paraids, while making sure that the order between candidates + // of the same para is preserved. + let mut candidates_per_para: BTreeMap> = BTreeMap::new(); + for candidate in backed_candidates { + candidates_per_para + .entry(candidate.descriptor().para_id) + .or_default() + .push(candidate); + } + + // Check that candidates pertaining to the same para form a chain. Drop the ones that + // don't, along with the rest of candidates which follow them in the input vector. + filter_unchained_candidates::(&mut candidates_per_para, allowed_relay_parents); + + // Remove any candidates that were concluded invalid or who are descendants of concluded invalid + // candidates (along with their descendants). + retain_candidates::(&mut candidates_per_para, |_, candidate| { + let keep = !concluded_invalid_with_descendants.contains(&candidate.candidate().hash()); + + if !keep { + log::debug!( + target: LOG_TARGET, + "Found backed candidate {:?} which was concluded invalid or is a descendant of a concluded invalid candidate, for paraid {:?}.", + candidate.candidate().hash(), + candidate.descriptor().para_id + ); + } + keep }); - let initial_candidate_count = backed_candidates.len(); - // Map candidates to scheduled cores. Filter out any unscheduled candidates. + // Map candidates to scheduled cores. Filter out any unscheduled candidates along with their + // descendants. let mut backed_candidates_with_core = map_candidates_to_cores::( &allowed_relay_parents, scheduled, core_index_enabled, - backed_candidates, + candidates_per_para, ); - let dropped_unscheduled_candidates = - initial_candidate_count != backed_candidates_with_core.len(); - - // Filter out backing statements from disabled validators - let votes_from_disabled_were_dropped = filter_backed_statements_from_disabled_validators::( + // Filter out backing statements from disabled validators. If by that we render a candidate with + // less backing votes than required, filter that candidate also. As all the other filtering + // operations above, we drop the descendants of the dropped candidates also. + filter_backed_statements_from_disabled_validators::( &mut backed_candidates_with_core, &allowed_relay_parents, core_index_enabled, ); - // Sort the `Vec` last, once there is a guarantee that these - // `BackedCandidates` references the expected relay chain parent, - // but more importantly are scheduled for a free core. - // This both avoids extra work for obviously invalid candidates, - // but also allows this to be done in place. - backed_candidates_with_core.sort_by(|(_x, core_x), (_y, core_y)| core_x.cmp(&core_y)); - - SanitizedBackedCandidates { - dropped_unscheduled_candidates, - votes_from_disabled_were_dropped, - backed_candidates_with_core, - } + backed_candidates_with_core +} + +fn count_backed_candidates(backed_candidates: &BTreeMap>) -> usize { + backed_candidates.iter().fold(0, |mut count, (_id, candidates)| { + count += candidates.len(); + count + }) } /// Derive entropy from babe provided per block randomness. @@ -1117,7 +1070,7 @@ fn limit_and_sanitize_disputes< log::debug!(target: LOG_TARGET, "Above max consumable weight: {}/{}", disputes_weight, max_consumable_weight); let mut checked_acc = Vec::::with_capacity(disputes.len()); - // Accumualated weight of all disputes picked, that passed the checks. + // Accumulated weight of all disputes picked, that passed the checks. let mut weight_acc = Weight::zero(); // Select disputes in-order until the remaining weight is attained @@ -1146,48 +1099,82 @@ fn limit_and_sanitize_disputes< } } -// Filters statements from disabled validators in `BackedCandidate`, non-scheduled candidates and -// few more sanity checks. Returns `true` if at least one statement is removed and `false` -// otherwise. -fn filter_backed_statements_from_disabled_validators( - backed_candidates_with_core: &mut Vec<( - BackedCandidate<::Hash>, - CoreIndex, - )>, +// Helper function for filtering candidates which don't pass the given predicate. When/if the first +// candidate which failes the predicate is found, all the other candidates that follow are dropped. +fn retain_candidates< + T: inclusion::Config + paras::Config + inclusion::Config, + F: FnMut(ParaId, &mut C) -> bool, + C, +>( + candidates_per_para: &mut BTreeMap>, + mut pred: F, +) { + for (para_id, candidates) in candidates_per_para.iter_mut() { + let mut latest_valid_idx = None; + + for (idx, candidate) in candidates.iter_mut().enumerate() { + if pred(*para_id, candidate) { + // Found a valid candidate. + latest_valid_idx = Some(idx); + } else { + break + } + } + + if let Some(latest_valid_idx) = latest_valid_idx { + candidates.truncate(latest_valid_idx + 1); + } else { + candidates.clear(); + } + } + + candidates_per_para.retain(|_, c| !c.is_empty()); +} + +// Filters statements from disabled validators in `BackedCandidate` and does a few more sanity +// checks. +fn filter_backed_statements_from_disabled_validators< + T: shared::Config + scheduler::Config + inclusion::Config, +>( + backed_candidates_with_core: &mut BTreeMap< + ParaId, + Vec<(BackedCandidate<::Hash>, CoreIndex)>, + >, allowed_relay_parents: &AllowedRelayParentsTracker>, core_index_enabled: bool, -) -> bool { +) { let disabled_validators = BTreeSet::<_>::from_iter(shared::Pallet::::disabled_validators().into_iter()); if disabled_validators.is_empty() { // No disabled validators - nothing to do - return false + return } - let backed_len_before = backed_candidates_with_core.len(); - - // Flag which will be returned. Set to `true` if at least one vote is filtered. - let mut filtered = false; - let minimum_backing_votes = configuration::Pallet::::config().minimum_backing_votes; // Process all backed candidates. `validator_indices` in `BackedCandidates` are indices within // the validator group assigned to the parachain. To obtain this group we need: // 1. Core index assigned to the parachain which has produced the candidate // 2. The relay chain block number of the candidate - backed_candidates_with_core.retain_mut(|(bc, core_idx)| { - let (validator_indices, maybe_core_index) = bc.validator_indices_and_core_index(core_index_enabled); + retain_candidates::(backed_candidates_with_core, |para_id, (bc, core_idx)| { + let (validator_indices, maybe_core_index) = + bc.validator_indices_and_core_index(core_index_enabled); let mut validator_indices = BitVec::<_>::from(validator_indices); - // Get relay parent block number of the candidate. We need this to get the group index assigned to this core at this block number - let relay_parent_block_number = match allowed_relay_parents - .acquire_info(bc.descriptor().relay_parent, None) { + // Get relay parent block number of the candidate. We need this to get the group index + // assigned to this core at this block number + let relay_parent_block_number = + match allowed_relay_parents.acquire_info(bc.descriptor().relay_parent, None) { Some((_, block_num)) => block_num, None => { - log::debug!(target: LOG_TARGET, "Relay parent {:?} for candidate is not in the allowed relay parents. Dropping the candidate.", bc.descriptor().relay_parent); + log::debug!( + target: LOG_TARGET, + "Relay parent {:?} for candidate is not in the allowed relay parents. Dropping the candidate.", + bc.descriptor().relay_parent + ); return false - } + }, }; // Get the group index for the core @@ -1208,12 +1195,15 @@ fn filter_backed_statements_from_disabled_validators { log::debug!(target: LOG_TARGET, "Can't get the validators from group {:?}. Dropping the candidate.", group_idx); return false - } + }, }; // Bitmask with the disabled indices within the validator group - let disabled_indices = BitVec::::from_iter(validator_group.iter().map(|idx| disabled_validators.contains(idx))); - // The indices of statements from disabled validators in `BackedCandidate`. We have to drop these. + let disabled_indices = BitVec::::from_iter( + validator_group.iter().map(|idx| disabled_validators.contains(idx)), + ); + // The indices of statements from disabled validators in `BackedCandidate`. We have to drop + // these. let indices_to_drop = disabled_indices.clone() & &validator_indices; // Apply the bitmask to drop the disabled validator from `validator_indices` validator_indices &= !disabled_indices; @@ -1225,62 +1215,218 @@ fn filter_backed_statements_from_disabled_validators 0 { - filtered = true; - } - // By filtering votes we might render the candidate invalid and cause a failure in // [`process_candidates`]. To avoid this we have to perform a sanity check here. If there // are not enough backing votes after filtering we will remove the whole candidate. - if bc.validity_votes().len() < effective_minimum_backing_votes( - validator_group.len(), - minimum_backing_votes - ) { + if bc.validity_votes().len() < + effective_minimum_backing_votes(validator_group.len(), minimum_backing_votes) + { + log::debug!( + target: LOG_TARGET, + "Dropping candidate {:?} of paraid {:?} because it was left with too few backing votes after votes from disabled validators were filtered.", + bc.candidate().hash(), + para_id + ); + return false } true }); +} + +// Check that candidates pertaining to the same para form a chain. Drop the ones that +// don't, along with the rest of candidates which follow them in the input vector. +// In the process, duplicated candidates will also be dropped (even if they form a valid cycle; +// cycles are not allowed if they entail backing duplicated candidates). +fn filter_unchained_candidates( + candidates: &mut BTreeMap>>, + allowed_relay_parents: &AllowedRelayParentsTracker>, +) { + let mut para_latest_head_data: BTreeMap = BTreeMap::new(); + for para_id in candidates.keys() { + let latest_head_data = match >::para_latest_head_data(¶_id) { + None => { + defensive!("Latest included head data for paraid {:?} is None", para_id); + continue + }, + Some(latest_head_data) => latest_head_data, + }; + para_latest_head_data.insert(*para_id, latest_head_data); + } + + let mut para_visited_candidates: BTreeMap> = BTreeMap::new(); + + retain_candidates::(candidates, |para_id, candidate| { + let Some(latest_head_data) = para_latest_head_data.get(¶_id) else { return false }; + let candidate_hash = candidate.candidate().hash(); + + let visited_candidates = + para_visited_candidates.entry(para_id).or_insert_with(|| BTreeSet::new()); + if visited_candidates.contains(&candidate_hash) { + log::debug!( + target: LOG_TARGET, + "Found duplicate candidates for paraid {:?}. Dropping the candidates with hash {:?}", + para_id, + candidate_hash + ); + + // If we got a duplicate candidate, stop. + return false + } else { + visited_candidates.insert(candidate_hash); + } + + let prev_context = >::para_most_recent_context(para_id); + let check_ctx = CandidateCheckContext::::new(prev_context); + + let res = match check_ctx.verify_backed_candidate( + &allowed_relay_parents, + candidate.candidate(), + latest_head_data.clone(), + ) { + Ok(_) => true, + Err(err) => { + log::debug!( + target: LOG_TARGET, + "Backed candidate verification for candidate {:?} of paraid {:?} failed with {:?}", + candidate_hash, + para_id, + err + ); + false + }, + }; + + if res { + para_latest_head_data + .insert(para_id, candidate.candidate().commitments.head_data.clone()); + } - // Also return `true` if a whole candidate was dropped from the set - filtered || backed_len_before != backed_candidates_with_core.len() + res + }); } /// Map candidates to scheduled cores. -/// If the para only has one scheduled core and no `CoreIndex` is injected, map the candidate to the +/// If the para only has one scheduled core and one candidate supplied, map the candidate to the /// single core. If the para has multiple cores scheduled, only map the candidates which have a /// proper core injected. Filter out the rest. /// Also returns whether or not we dropped any candidates. +/// When dropping a candidate of a para, we must drop all subsequent candidates from that para +/// (because they form a chain). fn map_candidates_to_cores( allowed_relay_parents: &AllowedRelayParentsTracker>, mut scheduled: BTreeMap>, core_index_enabled: bool, - candidates: Vec>, -) -> Vec<(BackedCandidate, CoreIndex)> { - let mut backed_candidates_with_core = Vec::with_capacity(candidates.len()); - - // We keep a candidate if the parachain has only one core assigned or if - // a core index is provided by block author and it's indeed scheduled. - for backed_candidate in candidates { - let maybe_injected_core_index = get_injected_core_index::( - allowed_relay_parents, - &backed_candidate, - core_index_enabled, - ); + candidates: BTreeMap>>, +) -> BTreeMap, CoreIndex)>> { + let mut backed_candidates_with_core = BTreeMap::new(); + + for (para_id, backed_candidates) in candidates.into_iter() { + if backed_candidates.len() == 0 { + defensive!("Backed candidates for paraid {} is empty.", para_id); + continue + } - let scheduled_cores = scheduled.get_mut(&backed_candidate.descriptor().para_id); - // Candidates without scheduled cores are silently filtered out. + let scheduled_cores = scheduled.get_mut(¶_id); + + // ParaIds without scheduled cores are silently filtered out. if let Some(scheduled_cores) = scheduled_cores { - if let Some(core_idx) = maybe_injected_core_index { - if scheduled_cores.contains(&core_idx) { - scheduled_cores.remove(&core_idx); - backed_candidates_with_core.push((backed_candidate, core_idx)); + if scheduled_cores.len() == 0 { + log::debug!( + target: LOG_TARGET, + "Paraid: {:?} has no scheduled cores but {} candidates were supplied.", + para_id, + backed_candidates.len() + ); + + // Non-elastic scaling case. One core per para. + } else if scheduled_cores.len() == 1 && !core_index_enabled { + backed_candidates_with_core.insert( + para_id, + vec![( + // We need the first one here, as we assume candidates of a para are in + // dependency order. + backed_candidates.into_iter().next().expect("Length is at least 1"), + scheduled_cores.pop_first().expect("Length is 1"), + )], + ); + continue; + + // Elastic scaling case. We only allow candidates which have the right core + // indices injected. + } else if scheduled_cores.len() >= 1 && core_index_enabled { + // We must preserve the dependency order given in the input. + let mut temp_backed_candidates = Vec::with_capacity(scheduled_cores.len()); + + for candidate in backed_candidates { + if scheduled_cores.len() == 0 { + // We've got candidates for all of this para's assigned cores. Move on to + // the next para. + log::debug!( + target: LOG_TARGET, + "Found enough candidates for paraid: {:?}.", + candidate.descriptor().para_id + ); + break; + } + let maybe_injected_core_index: Option = + get_injected_core_index::(allowed_relay_parents, &candidate); + + if let Some(core_index) = maybe_injected_core_index { + if scheduled_cores.remove(&core_index) { + temp_backed_candidates.push((candidate, core_index)); + } else { + // if we got a candidate for a core index which is not scheduled, stop + // the work for this para. the already processed candidate chain in + // temp_backed_candidates is still fine though. + log::debug!( + target: LOG_TARGET, + "Found a backed candidate {:?} with injected core index {}, which is not scheduled for paraid {:?}.", + candidate.candidate().hash(), + core_index.0, + candidate.descriptor().para_id + ); + + break; + } + } else { + // if we got a candidate which does not contain its core index, stop the + // work for this para. the already processed candidate chain in + // temp_backed_candidates is still fine though. + + log::debug!( + target: LOG_TARGET, + "Found a backed candidate {:?} with no injected core index, for paraid {:?} which has multiple scheduled cores.", + candidate.candidate().hash(), + candidate.descriptor().para_id + ); + + break; + } } - } else if scheduled_cores.len() == 1 { - backed_candidates_with_core - .push((backed_candidate, scheduled_cores.pop_first().expect("Length is 1"))); + + if !temp_backed_candidates.is_empty() { + backed_candidates_with_core + .entry(para_id) + .or_insert_with(|| vec![]) + .extend(temp_backed_candidates); + } + } else { + log::warn!( + target: LOG_TARGET, + "Found a paraid {:?} which has multiple scheduled cores but ElasticScalingMVP feature is not enabled: {:?}", + para_id, + scheduled_cores + ); } + } else { + log::debug!( + target: LOG_TARGET, + "Paraid: {:?} has no scheduled cores but {} candidates were supplied.", + para_id, + backed_candidates.len() + ); } } @@ -1290,13 +1436,11 @@ fn map_candidates_to_cores( allowed_relay_parents: &AllowedRelayParentsTracker>, candidate: &BackedCandidate, - core_index_enabled: bool, ) -> Option { // After stripping the 8 bit extensions, the `validator_indices` field length is expected // to be equal to backing group size. If these don't match, the `CoreIndex` is badly encoded, // or not supported. - let (validator_indices, maybe_core_idx) = - candidate.validator_indices_and_core_index(core_index_enabled); + let (validator_indices, maybe_core_idx) = candidate.validator_indices_and_core_index(true); let Some(core_idx) = maybe_core_idx else { return None }; @@ -1306,7 +1450,7 @@ fn get_injected_core_index { log::debug!( target: LOG_TARGET, - "Relay parent {:?} for candidate {:?} is not in the allowed relay parents. Dropping the candidate.", + "Relay parent {:?} for candidate {:?} is not in the allowed relay parents.", candidate.descriptor().relay_parent, candidate.candidate().hash(), ); @@ -1323,9 +1467,8 @@ fn get_injected_core_index { log::debug!( target: LOG_TARGET, - "Can't get the group index for core idx {:?}. Dropping the candidate {:?}.", + "Can't get the group index for core idx {:?}.", core_idx, - candidate.candidate().hash(), ); return None }, @@ -1339,6 +1482,14 @@ fn get_injected_core_index MockGenesisConfig { + MockGenesisConfig { + configuration: configuration::GenesisConfig { + config: HostConfiguration { + max_head_data_size: 0b100000, + scheduler_params: SchedulerParams { + group_rotation_frequency: u32::MAX, + ..Default::default() + }, + ..Default::default() + }, + }, + ..Default::default() + } +} + // In order to facilitate benchmarks as tests we have a benchmark feature gated `WeightInfo` impl // that uses 0 for all the weights. Because all the weights are 0, the tests that rely on // weights for limiting data will fail, so we don't run them when using the benchmark feature. #[cfg(not(feature = "runtime-benchmarks"))] mod enter { - use super::{inclusion::tests::TestCandidateBuilder, *}; use crate::{ builder::{Bench, BenchBuilder}, - mock::{mock_assigner, new_test_ext, BlockLength, BlockWeights, MockGenesisConfig, Test}, + mock::{mock_assigner, new_test_ext, BlockLength, BlockWeights, RuntimeOrigin, Test}, scheduler::{ common::{Assignment, AssignmentProvider}, ParasEntry, }, + session_info, }; use alloc::collections::btree_map::BTreeMap; use assert_matches::assert_matches; + use core::panic; use frame_support::assert_ok; use frame_system::limits; - use primitives::vstaging::SchedulerParams; + use primitives::{vstaging::SchedulerParams, AvailabilityBitfield, UncheckedSigned}; use sp_runtime::Perbill; struct TestConfig { @@ -45,6 +68,8 @@ mod enter { num_validators_per_core: u32, code_upgrade: Option, fill_claimqueue: bool, + elastic_paras: BTreeMap, + unavailable_cores: Vec, } fn make_inherent_data( @@ -55,25 +80,39 @@ mod enter { num_validators_per_core, code_upgrade, fill_claimqueue, + elastic_paras, + unavailable_cores, }: TestConfig, ) -> Bench { + let extra_cores = elastic_paras + .values() + .map(|count| *count as usize) + .sum::() + .saturating_sub(elastic_paras.len() as usize); + let total_cores = dispute_sessions.len() + backed_and_concluding.len() + extra_cores; + let builder = BenchBuilder::::new() - .set_max_validators( - (dispute_sessions.len() + backed_and_concluding.len()) as u32 * - num_validators_per_core, - ) + .set_max_validators((total_cores) as u32 * num_validators_per_core) + .set_elastic_paras(elastic_paras.clone()) .set_max_validators_per_core(num_validators_per_core) .set_dispute_statements(dispute_statements) - .set_backed_and_concluding_paras(backed_and_concluding) + .set_backed_and_concluding_paras(backed_and_concluding.clone()) .set_dispute_sessions(&dispute_sessions[..]) - .set_fill_claimqueue(fill_claimqueue); + .set_fill_claimqueue(fill_claimqueue) + .set_unavailable_cores(unavailable_cores); // Setup some assignments as needed: mock_assigner::Pallet::::set_core_count(builder.max_cores()); - for core_index in 0..builder.max_cores() { - // Core index == para_id in this case - mock_assigner::Pallet::::add_test_assignment(Assignment::Bulk(core_index.into())); - } + + (0..(builder.max_cores() as usize - extra_cores)).for_each(|para_id| { + (0..elastic_paras.get(&(para_id as u32)).cloned().unwrap_or(1)).for_each( + |_para_local_core_idx| { + mock_assigner::Pallet::::add_test_assignment(Assignment::Bulk( + para_id.into(), + )); + }, + ); + }); if let Some(code_size) = code_upgrade { builder.set_code_upgrade(code_size).build() @@ -104,6 +143,8 @@ mod enter { num_validators_per_core: 1, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); // We expect the scenario to have cores 0 & 1 with pending availability. The backed @@ -145,6 +186,305 @@ mod enter { Pallet::::on_chain_votes().unwrap().session, 2 ); + + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(0)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![CoreIndex(0)] + ); + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(1)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![CoreIndex(1)] + ); + }); + } + + #[test] + fn include_backed_candidates_elastic_scaling() { + // ParaId 0 has one pending candidate on core 0. + // ParaId 1 has one pending candidate on core 1. + // ParaId 2 has three pending candidates on cores 2, 3 and 4. + // All of them are being made available in this block. Propose 5 more candidates (one for + // each core) and check that they're successfully backed and the old ones enacted. + let config = default_config(); + assert!(config.configuration.config.scheduler_params.lookahead > 0); + new_test_ext(config).execute_with(|| { + // Set the elastic scaling MVP feature. + >::set_node_feature( + RuntimeOrigin::root(), + FeatureIndex::ElasticScalingMVP as u8, + true, + ) + .unwrap(); + + let dispute_statements = BTreeMap::new(); + + let mut backed_and_concluding = BTreeMap::new(); + backed_and_concluding.insert(0, 1); + backed_and_concluding.insert(1, 1); + backed_and_concluding.insert(2, 1); + + let scenario = make_inherent_data(TestConfig { + dispute_statements, + dispute_sessions: vec![], // No disputes + backed_and_concluding, + num_validators_per_core: 1, + code_upgrade: None, + fill_claimqueue: false, + elastic_paras: [(2, 3)].into_iter().collect(), + unavailable_cores: vec![], + }); + + let expected_para_inherent_data = scenario.data.clone(); + + // Check the para inherent data is as expected: + // * 1 bitfield per validator (5 validators) + assert_eq!(expected_para_inherent_data.bitfields.len(), 5); + // * 1 backed candidate per core (5 cores) + assert_eq!(expected_para_inherent_data.backed_candidates.len(), 5); + // * 0 disputes. + assert_eq!(expected_para_inherent_data.disputes.len(), 0); + let mut inherent_data = InherentData::new(); + inherent_data + .put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data) + .unwrap(); + + // The current schedule is empty prior to calling `create_inherent_enter`. + assert!(>::claimqueue_is_empty()); + + assert!(Pallet::::on_chain_votes().is_none()); + + // Nothing is filtered out (including the backed candidates.) + assert_eq!( + Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(), + expected_para_inherent_data + ); + + assert_eq!( + // The length of this vec is equal to the number of candidates, so we know our 5 + // backed candidates did not get filtered out + Pallet::::on_chain_votes().unwrap().backing_validators_per_candidate.len(), + 5 + ); + + assert_eq!( + // The session of the on chain votes should equal the current session, which is 2 + Pallet::::on_chain_votes().unwrap().session, + 2 + ); + + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(0)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![CoreIndex(0)] + ); + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(1)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![CoreIndex(1)] + ); + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(2)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![CoreIndex(2), CoreIndex(3), CoreIndex(4)] + ); + }); + + // ParaId 0 has one pending candidate on core 0. + // ParaId 1 has one pending candidate on core 1. + // ParaId 2 has 4 pending candidates on cores 2, 3, 4 and 5. + // Cores 1, 2 and 3 are being made available in this block. Propose 6 more candidates (one + // for each core) and check that the right ones are successfully backed and the old ones + // enacted. + let config = default_config(); + assert!(config.configuration.config.scheduler_params.lookahead > 0); + new_test_ext(config).execute_with(|| { + // Set the elastic scaling MVP feature. + >::set_node_feature( + RuntimeOrigin::root(), + FeatureIndex::ElasticScalingMVP as u8, + true, + ) + .unwrap(); + + let mut backed_and_concluding = BTreeMap::new(); + backed_and_concluding.insert(0, 1); + backed_and_concluding.insert(1, 1); + backed_and_concluding.insert(2, 1); + + // Modify the availability bitfields so that cores 0, 4 and 5 are not being made + // available. + let unavailable_cores = vec![0, 4, 5]; + + let scenario = make_inherent_data(TestConfig { + dispute_statements: BTreeMap::new(), + dispute_sessions: vec![], // No disputes + backed_and_concluding, + num_validators_per_core: 1, + code_upgrade: None, + fill_claimqueue: true, + elastic_paras: [(2, 4)].into_iter().collect(), + unavailable_cores: unavailable_cores.clone(), + }); + + let mut expected_para_inherent_data = scenario.data.clone(); + + // Check the para inherent data is as expected: + // * 1 bitfield per validator (6 validators) + assert_eq!(expected_para_inherent_data.bitfields.len(), 6); + // * 1 backed candidate per core (6 cores) + assert_eq!(expected_para_inherent_data.backed_candidates.len(), 6); + // * 0 disputes. + assert_eq!(expected_para_inherent_data.disputes.len(), 0); + assert!(Pallet::::on_chain_votes().is_none()); + + expected_para_inherent_data.backed_candidates = expected_para_inherent_data + .backed_candidates + .into_iter() + .filter(|candidate| { + let (_, Some(core_index)) = candidate.validator_indices_and_core_index(true) + else { + panic!("Core index must have been injected"); + }; + !unavailable_cores.contains(&core_index.0) + }) + .collect(); + + let mut inherent_data = InherentData::new(); + inherent_data.put_data(PARACHAINS_INHERENT_IDENTIFIER, &scenario.data).unwrap(); + + assert!(!>::claimqueue_is_empty()); + + // The right candidates have been filtered out (the ones for cores 0,4,5) + assert_eq!( + Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(), + expected_para_inherent_data + ); + + // 3 candidates have been backed (for cores 1,2 and 3) + assert_eq!( + Pallet::::on_chain_votes().unwrap().backing_validators_per_candidate.len(), + 3 + ); + + assert_eq!( + // The session of the on chain votes should equal the current session, which is 2 + Pallet::::on_chain_votes().unwrap().session, + 2 + ); + + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(1)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![CoreIndex(1)] + ); + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(2)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![CoreIndex(4), CoreIndex(5), CoreIndex(2), CoreIndex(3)] + ); + + let expected_heads = (0..=2) + .map(|id| { + inclusion::PendingAvailability::::get(ParaId::from(id)) + .unwrap() + .back() + .unwrap() + .candidate_commitments() + .head_data + .clone() + }) + .collect::>(); + + // Now just make all candidates available. + let mut data = scenario.data.clone(); + let validators = session_info::Pallet::::session_info(2).unwrap().validators; + let signing_context = SigningContext { + parent_hash: BenchBuilder::::header(4).hash(), + session_index: 2, + }; + + data.backed_candidates.clear(); + + data.bitfields.iter_mut().enumerate().for_each(|(i, bitfield)| { + let unchecked_signed = UncheckedSigned::::benchmark_sign( + validators.get(ValidatorIndex(i as u32)).unwrap(), + bitvec::bitvec![u8, bitvec::order::Lsb0; 1; 6].into(), + &signing_context, + ValidatorIndex(i as u32), + ); + *bitfield = unchecked_signed; + }); + let mut inherent_data = InherentData::new(); + inherent_data.put_data(PARACHAINS_INHERENT_IDENTIFIER, &data).unwrap(); + + // Nothing has been filtered out. + assert_eq!( + Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(), + data + ); + + // No more candidates have been backed + assert!(Pallet::::on_chain_votes() + .unwrap() + .backing_validators_per_candidate + .is_empty()); + + // No more pending availability candidates + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(0)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![] + ); + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(1)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![] + ); + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(2)) + .unwrap() + .into_iter() + .map(|c| c.core_occupied()) + .collect::>(), + vec![] + ); + + // Paras have the right on-chain heads now + expected_heads.into_iter().enumerate().for_each(|(id, head)| { + assert_eq!( + paras::Pallet::::para_head(ParaId::from(id as u32)).unwrap(), + head + ); + }); }); } @@ -255,6 +595,8 @@ mod enter { num_validators_per_core: 5, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -326,6 +668,8 @@ mod enter { num_validators_per_core: 6, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -395,6 +739,8 @@ mod enter { num_validators_per_core: 4, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -480,6 +826,8 @@ mod enter { num_validators_per_core: 5, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -565,6 +913,8 @@ mod enter { num_validators_per_core: 5, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -649,6 +999,8 @@ mod enter { num_validators_per_core: 5, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -754,6 +1106,8 @@ mod enter { num_validators_per_core: 5, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -820,6 +1174,8 @@ mod enter { num_validators_per_core: 5, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -884,6 +1240,8 @@ mod enter { num_validators_per_core: 5, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -985,6 +1343,8 @@ mod enter { num_validators_per_core: 5, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let mut para_inherent_data = scenario.data.clone(); @@ -1072,6 +1432,8 @@ mod enter { num_validators_per_core: 5, code_upgrade: None, fill_claimqueue: false, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], }); let expected_para_inherent_data = scenario.data.clone(); @@ -1119,7 +1481,7 @@ mod sanitizers { inclusion::tests::{ back_candidate, collator_sign_candidate, BackingKind, TestCandidateBuilder, }, - mock::{new_test_ext, MockGenesisConfig}, + mock::new_test_ext, }; use bitvec::order::Lsb0; use primitives::{ @@ -1375,9 +1737,11 @@ mod sanitizers { mod candidates { use crate::{ - mock::set_disabled_validators, + mock::{set_disabled_validators, RuntimeOrigin}, scheduler::{common::Assignment, ParasEntry}, + util::{make_persisted_validation_data, make_persisted_validation_data_with_parent}, }; + use primitives::ValidationCode; use alloc::collections::vec_deque::VecDeque; use super::*; @@ -1385,13 +1749,14 @@ mod sanitizers { // Backed candidates and scheduled parachains used for `sanitize_backed_candidates` testing struct TestData { backed_candidates: Vec, - all_backed_candidates_with_core: Vec<(BackedCandidate, CoreIndex)>, + expected_backed_candidates_with_core: + BTreeMap>, scheduled_paras: BTreeMap>, } - // Generate test data for the candidates and assert that the evnironment is set as expected + // Generate test data for the candidates and assert that the environment is set as expected // (check the comments for details) - fn get_test_data(core_index_enabled: bool) -> TestData { + fn get_test_data_one_core_per_para(core_index_enabled: bool) -> TestData { const RELAY_PARENT_NUM: u32 = 3; // Add the relay parent to `shared` pallet. Otherwise some code (e.g. filtering backing @@ -1467,6 +1832,24 @@ mod sanitizers { ), ])); + // Set the on-chain included head data for paras. + paras::Pallet::::set_current_head(ParaId::from(1), HeadData(vec![1])); + paras::Pallet::::set_current_head(ParaId::from(2), HeadData(vec![2])); + + // Set the current_code_hash + paras::Pallet::::force_set_current_code( + RuntimeOrigin::root(), + ParaId::from(1), + ValidationCode(vec![1]), + ) + .unwrap(); + paras::Pallet::::force_set_current_code( + RuntimeOrigin::root(), + ParaId::from(2), + ValidationCode(vec![2]), + ) + .unwrap(); + // Callback used for backing candidates let group_validators = |group_index: GroupIndex| { match group_index { @@ -1486,8 +1869,15 @@ mod sanitizers { para_id: ParaId::from(idx1), relay_parent, pov_hash: Hash::repeat_byte(idx1 as u8), - persisted_validation_data_hash: [42u8; 32].into(), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(idx1), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![idx1 as u8]), ..Default::default() } .build(); @@ -1523,36 +1913,34 @@ mod sanitizers { ] ); - let all_backed_candidates_with_core = backed_candidates - .iter() - .map(|candidate| { - // Only one entry for this test data. - ( - candidate.clone(), - scheduled - .get(&candidate.descriptor().para_id) - .unwrap() - .first() - .copied() - .unwrap(), - ) - }) - .collect(); + let mut expected_backed_candidates_with_core = BTreeMap::new(); + + for candidate in backed_candidates.iter() { + let para_id = candidate.descriptor().para_id; + + expected_backed_candidates_with_core.entry(para_id).or_insert(vec![]).push(( + candidate.clone(), + scheduled.get(¶_id).unwrap().first().copied().unwrap(), + )); + } TestData { backed_candidates, scheduled_paras: scheduled, - all_backed_candidates_with_core, + expected_backed_candidates_with_core, } } - // Generate test data for the candidates and assert that the evnironment is set as expected + // Generate test data for the candidates and assert that the environment is set as expected // (check the comments for details) // Para 1 scheduled on core 0 and core 1. Two candidates are supplied. // Para 2 scheduled on cores 2 and 3. One candidate supplied. // Para 3 scheduled on core 4. One candidate supplied. // Para 4 scheduled on core 5. Two candidates supplied. // Para 5 scheduled on core 6. No candidates supplied. + // Para 6 is not scheduled. One candidate supplied. + // Para 7 is scheduled on core 7 and 8, but the candidate contains the wrong core index. + // Para 8 is scheduled on core 9, but the candidate contains the wrong core index. fn get_test_data_multiple_cores_per_para(core_index_enabled: bool) -> TestData { const RELAY_PARENT_NUM: u32 = 3; @@ -1581,6 +1969,7 @@ mod sanitizers { keyring::Sr25519Keyring::Eve, keyring::Sr25519Keyring::Ferdie, keyring::Sr25519Keyring::One, + keyring::Sr25519Keyring::Two, ]; for validator in validators.iter() { Keystore::sr25519_generate_new( @@ -1605,6 +1994,7 @@ mod sanitizers { vec![ValidatorIndex(4)], vec![ValidatorIndex(5)], vec![ValidatorIndex(6)], + vec![ValidatorIndex(7)], ]); // Update scheduler's claimqueue with the parachains @@ -1658,8 +2048,40 @@ mod sanitizers { RELAY_PARENT_NUM, )]), ), + ( + CoreIndex::from(7), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 7.into(), core_index: CoreIndex(7) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(8), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 7.into(), core_index: CoreIndex(8) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(9), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 8.into(), core_index: CoreIndex(9) }, + RELAY_PARENT_NUM, + )]), + ), ])); + // Set the on-chain included head data and current code hash. + for id in 1..=8u32 { + paras::Pallet::::set_current_head(ParaId::from(id), HeadData(vec![id as u8])); + paras::Pallet::::force_set_current_code( + RuntimeOrigin::root(), + ParaId::from(id), + ValidationCode(vec![id as u8]), + ) + .unwrap(); + } + // Callback used for backing candidates let group_validators = |group_index: GroupIndex| { match group_index { @@ -1670,6 +2092,7 @@ mod sanitizers { group_index if group_index == GroupIndex::from(4) => Some(vec![4]), group_index if group_index == GroupIndex::from(5) => Some(vec![5]), group_index if group_index == GroupIndex::from(6) => Some(vec![6]), + group_index if group_index == GroupIndex::from(7) => Some(vec![7]), _ => panic!("Group index out of bounds"), } @@ -1677,7 +2100,7 @@ mod sanitizers { }; let mut backed_candidates = vec![]; - let mut all_backed_candidates_with_core = vec![]; + let mut expected_backed_candidates_with_core = BTreeMap::new(); // Para 1 { @@ -1685,14 +2108,23 @@ mod sanitizers { para_id: ParaId::from(1), relay_parent, pov_hash: Hash::repeat_byte(1 as u8), - persisted_validation_data_hash: [42u8; 32].into(), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(1), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), hrmp_watermark: RELAY_PARENT_NUM, + head_data: HeadData(vec![1, 1]), + validation_code: ValidationCode(vec![1]), ..Default::default() } .build(); collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + let prev_candidate = candidate.clone(); let backed: BackedCandidate = back_candidate( candidate, &validators, @@ -1704,15 +2136,26 @@ mod sanitizers { ); backed_candidates.push(backed.clone()); if core_index_enabled { - all_backed_candidates_with_core.push((backed, CoreIndex(0))); + expected_backed_candidates_with_core + .entry(ParaId::from(1)) + .or_insert(vec![]) + .push((backed, CoreIndex(0))); } let mut candidate = TestCandidateBuilder { para_id: ParaId::from(1), relay_parent, pov_hash: Hash::repeat_byte(2 as u8), - persisted_validation_data_hash: [42u8; 32].into(), + persisted_validation_data_hash: make_persisted_validation_data_with_parent::< + Test, + >( + RELAY_PARENT_NUM, + Default::default(), + prev_candidate.commitments.head_data, + ) + .hash(), hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![1]), ..Default::default() } .build(); @@ -1730,7 +2173,10 @@ mod sanitizers { ); backed_candidates.push(backed.clone()); if core_index_enabled { - all_backed_candidates_with_core.push((backed, CoreIndex(1))); + expected_backed_candidates_with_core + .entry(ParaId::from(1)) + .or_insert(vec![]) + .push((backed, CoreIndex(1))); } } @@ -1740,8 +2186,15 @@ mod sanitizers { para_id: ParaId::from(2), relay_parent, pov_hash: Hash::repeat_byte(3 as u8), - persisted_validation_data_hash: [42u8; 32].into(), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(2), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![2]), ..Default::default() } .build(); @@ -1759,7 +2212,10 @@ mod sanitizers { ); backed_candidates.push(backed.clone()); if core_index_enabled { - all_backed_candidates_with_core.push((backed, CoreIndex(2))); + expected_backed_candidates_with_core + .entry(ParaId::from(2)) + .or_insert(vec![]) + .push((backed, CoreIndex(2))); } } @@ -1769,8 +2225,15 @@ mod sanitizers { para_id: ParaId::from(3), relay_parent, pov_hash: Hash::repeat_byte(4 as u8), - persisted_validation_data_hash: [42u8; 32].into(), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(3), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![3]), ..Default::default() } .build(); @@ -1787,7 +2250,10 @@ mod sanitizers { core_index_enabled.then_some(CoreIndex(4 as u32)), ); backed_candidates.push(backed.clone()); - all_backed_candidates_with_core.push((backed, CoreIndex(4))); + expected_backed_candidates_with_core + .entry(ParaId::from(3)) + .or_insert(vec![]) + .push((backed, CoreIndex(4))); } // Para 4 @@ -1796,14 +2262,22 @@ mod sanitizers { para_id: ParaId::from(4), relay_parent, pov_hash: Hash::repeat_byte(5 as u8), - persisted_validation_data_hash: [42u8; 32].into(), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(4), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![4]), ..Default::default() } .build(); collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + let prev_candidate = candidate.clone(); let backed = back_candidate( candidate, &validators, @@ -1811,17 +2285,28 @@ mod sanitizers { &keystore, &signing_context, BackingKind::Threshold, - None, + core_index_enabled.then_some(CoreIndex(5 as u32)), ); backed_candidates.push(backed.clone()); - all_backed_candidates_with_core.push((backed, CoreIndex(5))); + expected_backed_candidates_with_core + .entry(ParaId::from(4)) + .or_insert(vec![]) + .push((backed, CoreIndex(5))); let mut candidate = TestCandidateBuilder { para_id: ParaId::from(4), relay_parent, pov_hash: Hash::repeat_byte(6 as u8), - persisted_validation_data_hash: [42u8; 32].into(), + persisted_validation_data_hash: make_persisted_validation_data_with_parent::< + Test, + >( + RELAY_PARENT_NUM, + Default::default(), + prev_candidate.commitments.head_data, + ) + .hash(), hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![4]), ..Default::default() } .build(); @@ -1842,25 +2327,635 @@ mod sanitizers { // No candidate for para 5. - // State sanity checks - assert_eq!( - >::scheduled_paras().collect::>(), - vec![ - (CoreIndex(0), ParaId::from(1)), - (CoreIndex(1), ParaId::from(1)), - (CoreIndex(2), ParaId::from(2)), - (CoreIndex(3), ParaId::from(2)), - (CoreIndex(4), ParaId::from(3)), - (CoreIndex(5), ParaId::from(4)), - (CoreIndex(6), ParaId::from(5)), - ] - ); - let mut scheduled: BTreeMap> = BTreeMap::new(); - for (core_idx, para_id) in >::scheduled_paras() { - scheduled.entry(para_id).or_default().insert(core_idx); + // Para 6. + { + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(6), + relay_parent, + pov_hash: Hash::repeat_byte(3 as u8), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(6), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![6]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(6 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(6 as u32)), + ); + backed_candidates.push(backed.clone()); } - assert_eq!( + // Para 7. + { + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(7), + relay_parent, + pov_hash: Hash::repeat_byte(3 as u8), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(7), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![7]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(6 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(6 as u32)), + ); + backed_candidates.push(backed.clone()); + } + + // Para 8. + { + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(8), + relay_parent, + pov_hash: Hash::repeat_byte(3 as u8), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(8), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![8]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(6 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(7 as u32)), + ); + backed_candidates.push(backed.clone()); + if !core_index_enabled { + expected_backed_candidates_with_core + .entry(ParaId::from(8)) + .or_insert(vec![]) + .push((backed, CoreIndex(9))); + } + } + + // State sanity checks + assert_eq!( + >::scheduled_paras().collect::>(), + vec![ + (CoreIndex(0), ParaId::from(1)), + (CoreIndex(1), ParaId::from(1)), + (CoreIndex(2), ParaId::from(2)), + (CoreIndex(3), ParaId::from(2)), + (CoreIndex(4), ParaId::from(3)), + (CoreIndex(5), ParaId::from(4)), + (CoreIndex(6), ParaId::from(5)), + (CoreIndex(7), ParaId::from(7)), + (CoreIndex(8), ParaId::from(7)), + (CoreIndex(9), ParaId::from(8)), + ] + ); + let mut scheduled: BTreeMap> = BTreeMap::new(); + for (core_idx, para_id) in >::scheduled_paras() { + scheduled.entry(para_id).or_default().insert(core_idx); + } + + assert_eq!( + shared::Pallet::::active_validator_indices(), + vec![ + ValidatorIndex(0), + ValidatorIndex(1), + ValidatorIndex(2), + ValidatorIndex(3), + ValidatorIndex(4), + ValidatorIndex(5), + ValidatorIndex(6), + ValidatorIndex(7), + ] + ); + + TestData { + backed_candidates, + scheduled_paras: scheduled, + expected_backed_candidates_with_core, + } + } + + // Para 1 scheduled on core 0 and core 1. Two candidates are supplied. They form a chain but + // in the wrong order. + // Para 2 scheduled on core 2, core 3 and core 4. Three candidates are supplied. The second + // one is not part of the chain. + // Para 3 scheduled on core 5 and 6. Two candidates are supplied and they all form a chain. + // Para 4 scheduled on core 7 and 8. Duplicated candidates. + fn get_test_data_for_order_checks(core_index_enabled: bool) -> TestData { + const RELAY_PARENT_NUM: u32 = 3; + + // Add the relay parent to `shared` pallet. Otherwise some code (e.g. filtering backing + // votes) won't behave correctly + shared::Pallet::::add_allowed_relay_parent( + default_header().hash(), + Default::default(), + RELAY_PARENT_NUM, + 1, + ); + + let header = default_header(); + let relay_parent = header.hash(); + let session_index = SessionIndex::from(0_u32); + + let keystore = LocalKeystore::in_memory(); + let keystore = Arc::new(keystore) as KeystorePtr; + let signing_context = SigningContext { parent_hash: relay_parent, session_index }; + + let validators = vec![ + keyring::Sr25519Keyring::Alice, + keyring::Sr25519Keyring::Bob, + keyring::Sr25519Keyring::Charlie, + keyring::Sr25519Keyring::Dave, + keyring::Sr25519Keyring::Eve, + keyring::Sr25519Keyring::Ferdie, + keyring::Sr25519Keyring::One, + keyring::Sr25519Keyring::Two, + keyring::Sr25519Keyring::AliceStash, + ]; + for validator in validators.iter() { + Keystore::sr25519_generate_new( + &*keystore, + PARACHAIN_KEY_TYPE_ID, + Some(&validator.to_seed()), + ) + .unwrap(); + } + + // Set active validators in `shared` pallet + let validator_ids = + validators.iter().map(|v| v.public().into()).collect::>(); + shared::Pallet::::set_active_validators_ascending(validator_ids); + + // Set the validator groups in `scheduler` + scheduler::Pallet::::set_validator_groups(vec![ + vec![ValidatorIndex(0)], + vec![ValidatorIndex(1)], + vec![ValidatorIndex(2)], + vec![ValidatorIndex(3)], + vec![ValidatorIndex(4)], + vec![ValidatorIndex(5)], + vec![ValidatorIndex(6)], + vec![ValidatorIndex(7)], + vec![ValidatorIndex(8)], + ]); + + // Update scheduler's claimqueue with the parachains + scheduler::Pallet::::set_claimqueue(BTreeMap::from([ + ( + CoreIndex::from(0), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(0) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(1), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(1) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(2), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(2) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(3), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(3) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(4), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(4) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(5), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 3.into(), core_index: CoreIndex(5) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(6), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 3.into(), core_index: CoreIndex(6) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(7), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 4.into(), core_index: CoreIndex(7) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(8), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 4.into(), core_index: CoreIndex(8) }, + RELAY_PARENT_NUM, + )]), + ), + ])); + + // Set the on-chain included head data and current code hash. + for id in 1..=4u32 { + paras::Pallet::::set_current_head(ParaId::from(id), HeadData(vec![id as u8])); + paras::Pallet::::force_set_current_code( + RuntimeOrigin::root(), + ParaId::from(id), + ValidationCode(vec![id as u8]), + ) + .unwrap(); + } + + // Callback used for backing candidates + let group_validators = |group_index: GroupIndex| { + match group_index { + group_index if group_index == GroupIndex::from(0) => Some(vec![0]), + group_index if group_index == GroupIndex::from(1) => Some(vec![1]), + group_index if group_index == GroupIndex::from(2) => Some(vec![2]), + group_index if group_index == GroupIndex::from(3) => Some(vec![3]), + group_index if group_index == GroupIndex::from(4) => Some(vec![4]), + group_index if group_index == GroupIndex::from(5) => Some(vec![5]), + group_index if group_index == GroupIndex::from(6) => Some(vec![6]), + group_index if group_index == GroupIndex::from(7) => Some(vec![7]), + group_index if group_index == GroupIndex::from(8) => Some(vec![8]), + + _ => panic!("Group index out of bounds"), + } + .map(|m| m.into_iter().map(ValidatorIndex).collect::>()) + }; + + let mut backed_candidates = vec![]; + let mut expected_backed_candidates_with_core = BTreeMap::new(); + + // Para 1 + { + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(1), + relay_parent, + pov_hash: Hash::repeat_byte(1 as u8), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(1), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), + head_data: HeadData(vec![1, 1]), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![1]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let prev_candidate = candidate.clone(); + let prev_backed: BackedCandidate = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(0 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(0 as u32)), + ); + + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(1), + relay_parent, + pov_hash: Hash::repeat_byte(2 as u8), + persisted_validation_data_hash: make_persisted_validation_data_with_parent::< + Test, + >( + RELAY_PARENT_NUM, + Default::default(), + prev_candidate.commitments.head_data, + ) + .hash(), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![1]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(1 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(1 as u32)), + ); + backed_candidates.push(backed.clone()); + backed_candidates.push(prev_backed.clone()); + } + + // Para 2. + { + let mut candidate_1 = TestCandidateBuilder { + para_id: ParaId::from(2), + relay_parent, + pov_hash: Hash::repeat_byte(3 as u8), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(2), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), + head_data: HeadData(vec![2, 2]), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![2]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate_1); + + let backed_1: BackedCandidate = back_candidate( + candidate_1, + &validators, + group_validators(GroupIndex::from(2 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(2 as u32)), + ); + + backed_candidates.push(backed_1.clone()); + if core_index_enabled { + expected_backed_candidates_with_core + .entry(ParaId::from(2)) + .or_insert(vec![]) + .push((backed_1, CoreIndex(2))); + } + + let mut candidate_2 = TestCandidateBuilder { + para_id: ParaId::from(2), + relay_parent, + pov_hash: Hash::repeat_byte(4 as u8), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(2), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![2]), + head_data: HeadData(vec![3, 3]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate_2); + + let backed_2 = back_candidate( + candidate_2.clone(), + &validators, + group_validators(GroupIndex::from(3 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(3 as u32)), + ); + backed_candidates.push(backed_2.clone()); + + let mut candidate_3 = TestCandidateBuilder { + para_id: ParaId::from(2), + relay_parent, + pov_hash: Hash::repeat_byte(5 as u8), + persisted_validation_data_hash: make_persisted_validation_data_with_parent::< + Test, + >( + RELAY_PARENT_NUM, + Default::default(), + candidate_2.commitments.head_data, + ) + .hash(), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![2]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate_3); + + let backed_3 = back_candidate( + candidate_3, + &validators, + group_validators(GroupIndex::from(4 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(4 as u32)), + ); + backed_candidates.push(backed_3.clone()); + } + + // Para 3 + { + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(3), + relay_parent, + pov_hash: Hash::repeat_byte(6 as u8), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(3), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), + head_data: HeadData(vec![3, 3]), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![3]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let prev_candidate = candidate.clone(); + let backed: BackedCandidate = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(5 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(5 as u32)), + ); + backed_candidates.push(backed.clone()); + if core_index_enabled { + expected_backed_candidates_with_core + .entry(ParaId::from(3)) + .or_insert(vec![]) + .push((backed, CoreIndex(5))); + } + + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(3), + relay_parent, + pov_hash: Hash::repeat_byte(6 as u8), + persisted_validation_data_hash: make_persisted_validation_data_with_parent::< + Test, + >( + RELAY_PARENT_NUM, + Default::default(), + prev_candidate.commitments.head_data, + ) + .hash(), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![3]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(6 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(6 as u32)), + ); + backed_candidates.push(backed.clone()); + if core_index_enabled { + expected_backed_candidates_with_core + .entry(ParaId::from(3)) + .or_insert(vec![]) + .push((backed, CoreIndex(6))); + } + } + + // Para 4 + { + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(4), + relay_parent, + pov_hash: Hash::repeat_byte(8 as u8), + persisted_validation_data_hash: make_persisted_validation_data::( + ParaId::from(4), + RELAY_PARENT_NUM, + Default::default(), + ) + .unwrap() + .hash(), + head_data: HeadData(vec![4]), + hrmp_watermark: RELAY_PARENT_NUM, + validation_code: ValidationCode(vec![4]), + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed: BackedCandidate = back_candidate( + candidate.clone(), + &validators, + group_validators(GroupIndex::from(7 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(7 as u32)), + ); + backed_candidates.push(backed.clone()); + if core_index_enabled { + expected_backed_candidates_with_core + .entry(ParaId::from(4)) + .or_insert(vec![]) + .push((backed, CoreIndex(7))); + } + + let backed: BackedCandidate = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(7 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(8 as u32)), + ); + backed_candidates.push(backed.clone()); + } + + // State sanity checks + assert_eq!( + >::scheduled_paras().collect::>(), + vec![ + (CoreIndex(0), ParaId::from(1)), + (CoreIndex(1), ParaId::from(1)), + (CoreIndex(2), ParaId::from(2)), + (CoreIndex(3), ParaId::from(2)), + (CoreIndex(4), ParaId::from(2)), + (CoreIndex(5), ParaId::from(3)), + (CoreIndex(6), ParaId::from(3)), + (CoreIndex(7), ParaId::from(4)), + (CoreIndex(8), ParaId::from(4)), + ] + ); + let mut scheduled: BTreeMap> = BTreeMap::new(); + for (core_idx, para_id) in >::scheduled_paras() { + scheduled.entry(para_id).or_default().insert(core_idx); + } + + assert_eq!( shared::Pallet::::active_validator_indices(), vec![ ValidatorIndex(0), @@ -1870,43 +2965,38 @@ mod sanitizers { ValidatorIndex(4), ValidatorIndex(5), ValidatorIndex(6), + ValidatorIndex(7), + ValidatorIndex(8), ] ); TestData { backed_candidates, scheduled_paras: scheduled, - all_backed_candidates_with_core, + expected_backed_candidates_with_core, } } #[rstest] #[case(false)] #[case(true)] - fn happy_path(#[case] core_index_enabled: bool) { - new_test_ext(MockGenesisConfig::default()).execute_with(|| { + fn happy_path_one_core_per_para(#[case] core_index_enabled: bool) { + new_test_ext(default_config()).execute_with(|| { let TestData { backed_candidates, - all_backed_candidates_with_core, + expected_backed_candidates_with_core, scheduled_paras: scheduled, - } = get_test_data(core_index_enabled); - - let has_concluded_invalid = - |_idx: usize, _backed_candidate: &BackedCandidate| -> bool { false }; + } = get_test_data_one_core_per_para(core_index_enabled); assert_eq!( - sanitize_backed_candidates::( + sanitize_backed_candidates::( backed_candidates.clone(), &>::allowed_relay_parents(), - has_concluded_invalid, + BTreeSet::new(), scheduled, core_index_enabled ), - SanitizedBackedCandidates { - backed_candidates_with_core: all_backed_candidates_with_core, - votes_from_disabled_were_dropped: false, - dropped_unscheduled_candidates: false - } + expected_backed_candidates_with_core, ); }); } @@ -1915,29 +3005,46 @@ mod sanitizers { #[case(false)] #[case(true)] fn test_with_multiple_cores_per_para(#[case] core_index_enabled: bool) { - new_test_ext(MockGenesisConfig::default()).execute_with(|| { + new_test_ext(default_config()).execute_with(|| { let TestData { backed_candidates, - all_backed_candidates_with_core: expected_all_backed_candidates_with_core, + expected_backed_candidates_with_core, scheduled_paras: scheduled, } = get_test_data_multiple_cores_per_para(core_index_enabled); - let has_concluded_invalid = - |_idx: usize, _backed_candidate: &BackedCandidate| -> bool { false }; - assert_eq!( - sanitize_backed_candidates::( + sanitize_backed_candidates::( backed_candidates.clone(), &>::allowed_relay_parents(), - has_concluded_invalid, + BTreeSet::new(), scheduled, core_index_enabled ), - SanitizedBackedCandidates { - backed_candidates_with_core: expected_all_backed_candidates_with_core, - votes_from_disabled_were_dropped: false, - dropped_unscheduled_candidates: true - } + expected_backed_candidates_with_core, + ); + }); + } + + #[rstest] + #[case(false)] + #[case(true)] + fn test_candidate_ordering(#[case] core_index_enabled: bool) { + new_test_ext(default_config()).execute_with(|| { + let TestData { + backed_candidates, + scheduled_paras: scheduled, + expected_backed_candidates_with_core, + } = get_test_data_for_order_checks(core_index_enabled); + + assert_eq!( + sanitize_backed_candidates::( + backed_candidates.clone(), + &>::allowed_relay_parents(), + BTreeSet::new(), + scheduled, + core_index_enabled, + ), + expected_backed_candidates_with_core ); }); } @@ -1952,31 +3059,23 @@ mod sanitizers { #[case] core_index_enabled: bool, #[case] multiple_cores_per_para: bool, ) { - new_test_ext(MockGenesisConfig::default()).execute_with(|| { + new_test_ext(default_config()).execute_with(|| { let TestData { backed_candidates, .. } = if multiple_cores_per_para { get_test_data_multiple_cores_per_para(core_index_enabled) } else { - get_test_data(core_index_enabled) + get_test_data_one_core_per_para(core_index_enabled) }; let scheduled = BTreeMap::new(); - let has_concluded_invalid = - |_idx: usize, _backed_candidate: &BackedCandidate| -> bool { false }; - - let SanitizedBackedCandidates { - backed_candidates_with_core: sanitized_backed_candidates, - votes_from_disabled_were_dropped, - dropped_unscheduled_candidates, - } = sanitize_backed_candidates::( + + let sanitized_backed_candidates = sanitize_backed_candidates::( backed_candidates.clone(), &>::allowed_relay_parents(), - has_concluded_invalid, + BTreeSet::new(), scheduled, core_index_enabled, ); assert!(sanitized_backed_candidates.is_empty()); - assert!(!votes_from_disabled_were_dropped); - assert!(dropped_unscheduled_candidates); }); } @@ -1984,14 +3083,16 @@ mod sanitizers { #[rstest] #[case(false)] #[case(true)] - fn invalid_are_filtered_out(#[case] core_index_enabled: bool) { - new_test_ext(MockGenesisConfig::default()).execute_with(|| { + fn concluded_invalid_are_filtered_out_single_core_per_para( + #[case] core_index_enabled: bool, + ) { + new_test_ext(default_config()).execute_with(|| { let TestData { backed_candidates, scheduled_paras: scheduled, .. } = - get_test_data(core_index_enabled); + get_test_data_one_core_per_para(core_index_enabled); // mark every second one as concluded invalid let set = { - let mut set = std::collections::HashSet::new(); + let mut set = std::collections::BTreeSet::new(); for (idx, backed_candidate) in backed_candidates.iter().enumerate() { if idx & 0x01 == 0 { set.insert(backed_candidate.hash()); @@ -1999,23 +3100,98 @@ mod sanitizers { } set }; - let has_concluded_invalid = - |_idx: usize, candidate: &BackedCandidate| set.contains(&candidate.hash()); - let SanitizedBackedCandidates { - backed_candidates_with_core: sanitized_backed_candidates, - votes_from_disabled_were_dropped, - dropped_unscheduled_candidates, - } = sanitize_backed_candidates::( + let sanitized_backed_candidates: BTreeMap< + ParaId, + Vec<(BackedCandidate<_>, CoreIndex)>, + > = sanitize_backed_candidates::( backed_candidates.clone(), &>::allowed_relay_parents(), - has_concluded_invalid, + set, scheduled, core_index_enabled, ); assert_eq!(sanitized_backed_candidates.len(), backed_candidates.len() / 2); - assert!(!votes_from_disabled_were_dropped); - assert!(!dropped_unscheduled_candidates); + }); + } + + // candidates that have concluded as invalid are filtered out, as well as their descendants. + #[test] + fn concluded_invalid_are_filtered_out_multiple_cores_per_para() { + // Mark the first candidate of paraid 1 as invalid. Its descendant should also + // be dropped. Also mark the candidate of paraid 3 as invalid. + new_test_ext(default_config()).execute_with(|| { + let TestData { + backed_candidates, + scheduled_paras: scheduled, + mut expected_backed_candidates_with_core, + .. + } = get_test_data_multiple_cores_per_para(true); + + let mut invalid_set = std::collections::BTreeSet::new(); + + for (idx, backed_candidate) in backed_candidates.iter().enumerate() { + if backed_candidate.descriptor().para_id == ParaId::from(1) && idx == 0 { + invalid_set.insert(backed_candidate.hash()); + } else if backed_candidate.descriptor().para_id == ParaId::from(3) { + invalid_set.insert(backed_candidate.hash()); + } + } + let sanitized_backed_candidates: BTreeMap< + ParaId, + Vec<(BackedCandidate<_>, CoreIndex)>, + > = sanitize_backed_candidates::( + backed_candidates.clone(), + &>::allowed_relay_parents(), + invalid_set, + scheduled, + true, + ); + + // We'll be left with candidates from paraid 2 and 4. + + expected_backed_candidates_with_core.remove(&ParaId::from(1)).unwrap(); + expected_backed_candidates_with_core.remove(&ParaId::from(3)).unwrap(); + + assert_eq!(sanitized_backed_candidates, sanitized_backed_candidates); + }); + + // Mark the second candidate of paraid 1 as invalid. Its predecessor should be left + // in place. + new_test_ext(default_config()).execute_with(|| { + let TestData { + backed_candidates, + scheduled_paras: scheduled, + mut expected_backed_candidates_with_core, + .. + } = get_test_data_multiple_cores_per_para(true); + + let mut invalid_set = std::collections::BTreeSet::new(); + + for (idx, backed_candidate) in backed_candidates.iter().enumerate() { + if backed_candidate.descriptor().para_id == ParaId::from(1) && idx == 1 { + invalid_set.insert(backed_candidate.hash()); + } + } + let sanitized_backed_candidates: BTreeMap< + ParaId, + Vec<(BackedCandidate<_>, CoreIndex)>, + > = sanitize_backed_candidates::( + backed_candidates.clone(), + &>::allowed_relay_parents(), + invalid_set, + scheduled, + true, + ); + + // Only the second candidate of paraid 1 should be removed. + expected_backed_candidates_with_core + .get_mut(&ParaId::from(1)) + .unwrap() + .remove(1); + + // We'll be left with candidates from paraid 1, 2, 3 and 4. + assert_eq!(sanitized_backed_candidates, expected_backed_candidates_with_core); }); } @@ -2023,34 +3199,35 @@ mod sanitizers { #[case(false)] #[case(true)] fn disabled_non_signing_validator_doesnt_get_filtered(#[case] core_index_enabled: bool) { - new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let TestData { mut all_backed_candidates_with_core, .. } = - get_test_data(core_index_enabled); + new_test_ext(default_config()).execute_with(|| { + let TestData { mut expected_backed_candidates_with_core, .. } = + get_test_data_one_core_per_para(core_index_enabled); // Disable Eve set_disabled_validators(vec![4]); - let before = all_backed_candidates_with_core.clone(); + let before = expected_backed_candidates_with_core.clone(); // Eve is disabled but no backing statement is signed by it so nothing should be // filtered - assert!(!filter_backed_statements_from_disabled_validators::( - &mut all_backed_candidates_with_core, + filter_backed_statements_from_disabled_validators::( + &mut expected_backed_candidates_with_core, &>::allowed_relay_parents(), - core_index_enabled - )); - assert_eq!(all_backed_candidates_with_core, before); + core_index_enabled, + ); + assert_eq!(expected_backed_candidates_with_core, before); }); } + #[rstest] #[case(false)] #[case(true)] fn drop_statements_from_disabled_without_dropping_candidate( #[case] core_index_enabled: bool, ) { - new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let TestData { mut all_backed_candidates_with_core, .. } = - get_test_data(core_index_enabled); + new_test_ext(default_config()).execute_with(|| { + let TestData { mut expected_backed_candidates_with_core, .. } = + get_test_data_one_core_per_para(core_index_enabled); // Disable Alice set_disabled_validators(vec![0]); @@ -2064,11 +3241,22 @@ mod sanitizers { // Verify the initial state is as expected assert_eq!( - all_backed_candidates_with_core.get(0).unwrap().0.validity_votes().len(), + expected_backed_candidates_with_core + .get(&ParaId::from(1)) + .unwrap() + .iter() + .next() + .unwrap() + .0 + .validity_votes() + .len(), 2 ); - let (validator_indices, maybe_core_index) = all_backed_candidates_with_core - .get(0) + let (validator_indices, maybe_core_index) = expected_backed_candidates_with_core + .get(&ParaId::from(1)) + .unwrap() + .iter() + .next() .unwrap() .0 .validator_indices_and_core_index(core_index_enabled); @@ -2080,16 +3268,28 @@ mod sanitizers { assert_eq!(validator_indices.get(0).unwrap(), true); assert_eq!(validator_indices.get(1).unwrap(), true); - let untouched = all_backed_candidates_with_core.get(1).unwrap().0.clone(); + let untouched = expected_backed_candidates_with_core + .get(&ParaId::from(2)) + .unwrap() + .iter() + .next() + .unwrap() + .0 + .clone(); - assert!(filter_backed_statements_from_disabled_validators::( - &mut all_backed_candidates_with_core, + let before = expected_backed_candidates_with_core.clone(); + filter_backed_statements_from_disabled_validators::( + &mut expected_backed_candidates_with_core, &>::allowed_relay_parents(), - core_index_enabled - )); + core_index_enabled, + ); + assert_eq!(before.len(), expected_backed_candidates_with_core.len()); - let (validator_indices, maybe_core_index) = all_backed_candidates_with_core - .get(0) + let (validator_indices, maybe_core_index) = expected_backed_candidates_with_core + .get(&ParaId::from(1)) + .unwrap() + .iter() + .next() .unwrap() .0 .validator_indices_and_core_index(core_index_enabled); @@ -2100,47 +3300,137 @@ mod sanitizers { } // there should still be two backed candidates - assert_eq!(all_backed_candidates_with_core.len(), 2); + assert_eq!(expected_backed_candidates_with_core.len(), 2); // but the first one should have only one validity vote assert_eq!( - all_backed_candidates_with_core.get(0).unwrap().0.validity_votes().len(), + expected_backed_candidates_with_core + .get(&ParaId::from(1)) + .unwrap() + .iter() + .next() + .unwrap() + .0 + .validity_votes() + .len(), 1 ); // Validator 0 vote should be dropped, validator 1 - retained assert_eq!(validator_indices.get(0).unwrap(), false); assert_eq!(validator_indices.get(1).unwrap(), true); // the second candidate shouldn't be modified - assert_eq!(all_backed_candidates_with_core.get(1).unwrap().0, untouched); + assert_eq!( + expected_backed_candidates_with_core + .get(&ParaId::from(2)) + .unwrap() + .iter() + .next() + .unwrap() + .0, + untouched + ); }); } #[rstest] #[case(false)] #[case(true)] - fn drop_candidate_if_all_statements_are_from_disabled(#[case] core_index_enabled: bool) { - new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let TestData { mut all_backed_candidates_with_core, .. } = - get_test_data(core_index_enabled); + fn drop_candidate_if_all_statements_are_from_disabled_single_core_per_para( + #[case] core_index_enabled: bool, + ) { + new_test_ext(default_config()).execute_with(|| { + let TestData { mut expected_backed_candidates_with_core, .. } = + get_test_data_one_core_per_para(core_index_enabled); // Disable Alice and Bob set_disabled_validators(vec![0, 1]); // Verify the initial state is as expected assert_eq!( - all_backed_candidates_with_core.get(0).unwrap().0.validity_votes().len(), + expected_backed_candidates_with_core + .get(&ParaId::from(1)) + .unwrap() + .iter() + .next() + .unwrap() + .0 + .validity_votes() + .len(), 2 ); - let untouched = all_backed_candidates_with_core.get(1).unwrap().0.clone(); + let untouched = expected_backed_candidates_with_core + .get(&ParaId::from(2)) + .unwrap() + .iter() + .next() + .unwrap() + .0 + .clone(); - assert!(filter_backed_statements_from_disabled_validators::( - &mut all_backed_candidates_with_core, + filter_backed_statements_from_disabled_validators::( + &mut expected_backed_candidates_with_core, &>::allowed_relay_parents(), - core_index_enabled - )); + core_index_enabled, + ); + + assert_eq!(expected_backed_candidates_with_core.len(), 1); + assert_eq!( + expected_backed_candidates_with_core + .get(&ParaId::from(2)) + .unwrap() + .iter() + .next() + .unwrap() + .0, + untouched + ); + assert_eq!(expected_backed_candidates_with_core.get(&ParaId::from(1)), None); + }); + } + + #[test] + fn drop_candidate_if_all_statements_are_from_disabled_multiple_cores_per_para() { + // Disable Bob, only the second candidate of paraid 1 should be removed. + new_test_ext(default_config()).execute_with(|| { + let TestData { mut expected_backed_candidates_with_core, .. } = + get_test_data_multiple_cores_per_para(true); - assert_eq!(all_backed_candidates_with_core.len(), 1); - assert_eq!(all_backed_candidates_with_core.get(0).unwrap().0, untouched); + set_disabled_validators(vec![1]); + + let mut untouched = expected_backed_candidates_with_core.clone(); + + filter_backed_statements_from_disabled_validators::( + &mut expected_backed_candidates_with_core, + &>::allowed_relay_parents(), + true, + ); + + untouched.get_mut(&ParaId::from(1)).unwrap().remove(1); + + assert_eq!(expected_backed_candidates_with_core, untouched); }); + + // Disable Alice or disable both Alice and Bob, all candidates of paraid 1 should be + // removed. + for disabled in [vec![0], vec![0, 1]] { + new_test_ext(default_config()).execute_with(|| { + let TestData { mut expected_backed_candidates_with_core, .. } = + get_test_data_multiple_cores_per_para(true); + + set_disabled_validators(disabled); + + let mut untouched = expected_backed_candidates_with_core.clone(); + + filter_backed_statements_from_disabled_validators::( + &mut expected_backed_candidates_with_core, + &>::allowed_relay_parents(), + true, + ); + + untouched.remove(&ParaId::from(1)).unwrap(); + + assert_eq!(expected_backed_candidates_with_core, untouched); + }); + } } } } diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs b/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs index ba74e488cd3b..ed2e95b3cfa9 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs @@ -26,5 +26,5 @@ //! 2. Move methods from `vstaging` to `v3`. The new stable version should include all methods from //! `vstaging` tagged with the new version number (e.g. all `v3` methods). -pub mod v7; +pub mod v10; pub mod vstaging; diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs b/polkadot/runtime/parachains/src/runtime_api_impl/v10.rs similarity index 82% rename from polkadot/runtime/parachains/src/runtime_api_impl/v7.rs rename to polkadot/runtime/parachains/src/runtime_api_impl/v10.rs index 15190fcbb558..4cb1b001dcd6 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/v10.rs @@ -14,7 +14,7 @@ //! A module exporting runtime API implementation functions for all runtime APIs using `v5` //! primitives. //! -//! Runtimes implementing the v2 runtime API are recommended to forward directly to these +//! Runtimes implementing the v10 runtime API are recommended to forward directly to these //! functions. use crate::{ @@ -23,17 +23,19 @@ use crate::{ session_info, shared, }; use alloc::collections::btree_map::BTreeMap; +use frame_support::traits::{GetStorageVersion, StorageVersion}; use frame_system::pallet_prelude::*; use primitives::{ async_backing::{ AsyncBackingParams, BackingState, CandidatePendingAvailability, Constraints, InboundHrmpLimitations, OutboundHrmpChannelLimitations, }, - slashing, AuthorityDiscoveryId, CandidateEvent, CandidateHash, CommittedCandidateReceipt, - CoreIndex, CoreState, DisputeState, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, - Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, OccupiedCore, OccupiedCoreAssumption, - PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, - ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, + slashing, ApprovalVotingParams, AuthorityDiscoveryId, CandidateEvent, CandidateHash, + CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, ExecutorParams, GroupIndex, + GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, + NodeFeatures, OccupiedCore, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, + ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, + ValidatorId, ValidatorIndex, ValidatorSignature, }; use sp_runtime::traits::One; @@ -92,16 +94,41 @@ pub fn availability_cores() -> Vec { - let pending_availability = - >::pending_availability(entry.para_id()) - .expect("Occupied core always has pending availability; qed"); - - let backed_in_number = *pending_availability.backed_in_number(); + // Due to https://github.com/paritytech/polkadot-sdk/issues/64, using the new storage types would cause + // this runtime API to panic. We explicitly handle the storage for version 0 to + // prevent that. When removing the inclusion v0 -> v1 migration, this bit of code + // can also be removed. + let pending_availability = if >::on_chain_storage_version() == + StorageVersion::new(0) + { + inclusion::migration::v0::PendingAvailability::::get(entry.para_id()) + .expect("Occupied core always has pending availability; qed") + } else { + let candidate = >::pending_availability_with_core( + entry.para_id(), + CoreIndex(i as u32), + ) + .expect("Occupied core always has pending availability; qed"); + + // Translate to the old candidate format, as we don't need the commitments now. + inclusion::migration::v0::CandidatePendingAvailability { + core: candidate.core_occupied(), + hash: candidate.candidate_hash(), + descriptor: candidate.candidate_descriptor().clone(), + availability_votes: candidate.availability_votes().clone(), + backers: candidate.backers().clone(), + relay_parent_number: candidate.relay_parent_number(), + backed_in_number: candidate.backed_in_number(), + backing_group: candidate.backing_group(), + } + }; + + let backed_in_number = pending_availability.backed_in_number; // Use the same block number for determining the responsible group as what the // backing subsystem would use when it calls validator_groups api. let backing_group_allocation_time = - pending_availability.relay_parent_number() + One::one(); + pending_availability.relay_parent_number + One::one(); CoreState::Occupied(OccupiedCore { next_up_on_available: >::next_up_on_available(CoreIndex( i as u32, @@ -111,13 +138,13 @@ pub fn availability_cores() -> Vec>::next_up_on_time_out(CoreIndex( i as u32, )), - availability: pending_availability.availability_votes().clone(), + availability: pending_availability.availability_votes.clone(), group_responsible: group_responsible_for( backing_group_allocation_time, - pending_availability.core_occupied(), + pending_availability.core, ), - candidate_hash: pending_availability.candidate_hash(), - candidate_descriptor: pending_availability.candidate_descriptor().clone(), + candidate_hash: pending_availability.hash, + candidate_descriptor: pending_availability.descriptor, }) }, CoreOccupied::Free => { @@ -200,8 +227,8 @@ pub fn assumed_validation_data( }; let persisted_validation_data = make_validation_data().or_else(|| { - // Try again with force enacting the core. This check only makes sense if - // the core is occupied. + // Try again with force enacting the pending candidates. This check only makes sense if + // there are any pending candidates. >::pending_availability(para_id).and_then(|_| { >::force_enact(para_id); make_validation_data() @@ -465,27 +492,23 @@ pub fn backing_state( }; let pending_availability = { - // Note: the API deals with a `Vec` as it is future-proof for cases - // where there may be multiple candidates pending availability at a time. - // But at the moment only one candidate can be pending availability per - // parachain. crate::inclusion::PendingAvailability::::get(¶_id) - .and_then(|pending| { - let commitments = - crate::inclusion::PendingAvailabilityCommitments::::get(¶_id); - commitments.map(move |c| (pending, c)) - }) - .map(|(pending, commitments)| { - CandidatePendingAvailability { - candidate_hash: pending.candidate_hash(), - descriptor: pending.candidate_descriptor().clone(), - commitments, - relay_parent_number: pending.relay_parent_number(), - max_pov_size: constraints.max_pov_size, // assume always same in session. - } + .map(|pending_candidates| { + pending_candidates + .into_iter() + .map(|candidate| { + CandidatePendingAvailability { + candidate_hash: candidate.candidate_hash(), + descriptor: candidate.candidate_descriptor().clone(), + commitments: candidate.candidate_commitments().clone(), + relay_parent_number: candidate.relay_parent_number(), + max_pov_size: constraints.max_pov_size, /* assume always same in + * session. */ + } + }) + .collect() }) - .into_iter() - .collect() + .unwrap_or_else(|| vec![]) }; Some(BackingState { constraints, pending_availability }) @@ -495,3 +518,24 @@ pub fn backing_state( pub fn async_backing_params() -> AsyncBackingParams { >::config().async_backing_params } + +/// Implementation for `DisabledValidators` +// CAVEAT: this should only be called on the node side +// as it might produce incorrect results on session boundaries +pub fn disabled_validators() -> Vec +where + T: shared::Config, +{ + >::disabled_validators() +} + +/// Returns the current state of the node features. +pub fn node_features() -> NodeFeatures { + >::config().node_features +} + +/// Approval voting subsystem configuration parameters +pub fn approval_voting_params() -> ApprovalVotingParams { + let config = >::config(); + config.approval_voting_params +} diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs index e1cf8991b57e..8dfd28e6d1f3 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs @@ -16,31 +16,18 @@ //! Put implementations of functions from staging APIs here. -use crate::{configuration, initializer, shared}; +use crate::scheduler; #[cfg(not(feature = "std"))] use alloc::vec::Vec; -use primitives::{ - vstaging::{ApprovalVotingParams, NodeFeatures}, - ValidatorIndex, -}; - -/// Implementation for `DisabledValidators` -// CAVEAT: this should only be called on the node side -// as it might produce incorrect results on session boundaries -pub fn disabled_validators() -> Vec -where - T: shared::Config, -{ - >::disabled_validators() -} - -/// Returns the current state of the node features. -pub fn node_features() -> NodeFeatures { - >::config().node_features -} - -/// Approval voting subsystem configuration parameteres -pub fn approval_voting_params() -> ApprovalVotingParams { - let config = >::config(); - config.approval_voting_params +use alloc::collections::{btree_map::BTreeMap, vec_deque::VecDeque}; +use primitives::{CoreIndex, Id as ParaId}; + +/// Returns the claimqueue from the scheduler +pub fn claim_queue() -> BTreeMap> { + >::claimqueue() + .into_iter() + .map(|(core_index, entries)| { + (core_index, entries.into_iter().map(|e| e.para_id()).collect()) + }) + .collect() } diff --git a/polkadot/runtime/parachains/src/scheduler.rs b/polkadot/runtime/parachains/src/scheduler.rs index a2fe3cd7482f..57a4e04b2dee 100644 --- a/polkadot/runtime/parachains/src/scheduler.rs +++ b/polkadot/runtime/parachains/src/scheduler.rs @@ -396,16 +396,6 @@ impl Pallet { }); } - /// Get the para (chain or thread) ID assigned to a particular core or index, if any. Core - /// indices out of bounds will return `None`, as will indices of unassigned cores. - pub(crate) fn core_para(core_index: CoreIndex) -> Option { - let cores = AvailabilityCores::::get(); - match cores.get(core_index.0 as usize) { - None | Some(CoreOccupied::Free) => None, - Some(CoreOccupied::Paras(entry)) => Some(entry.para_id()), - } - } - /// Get the validators in the given group, if the group index is valid for this session. pub(crate) fn group_validators(group_index: GroupIndex) -> Option> { ValidatorGroups::::get().get(group_index.0 as usize).map(|g| g.clone()) diff --git a/polkadot/runtime/parachains/src/scheduler/migration.rs b/polkadot/runtime/parachains/src/scheduler/migration.rs index a2f49b43acf8..1118e1de38b2 100644 --- a/polkadot/runtime/parachains/src/scheduler/migration.rs +++ b/polkadot/runtime/parachains/src/scheduler/migration.rs @@ -20,12 +20,12 @@ use super::*; use alloc::vec::Vec; use frame_support::{ migrations::VersionedMigration, pallet_prelude::ValueQuery, storage_alias, - traits::OnRuntimeUpgrade, weights::Weight, + traits::UncheckedOnRuntimeUpgrade, weights::Weight, }; /// Old/legacy assignment representation (v0). /// -/// `Assignment` used to be a concrete type with the same layout V0Assignment, idential on all +/// `Assignment` used to be a concrete type with the same layout V0Assignment, identical on all /// assignment providers. This can be removed once storage has been migrated. #[derive(Encode, Decode, RuntimeDebug, TypeInfo, PartialEq, Clone)] struct V0Assignment { @@ -106,7 +106,8 @@ mod v0 { // - Assignments only consist of `ParaId`, `Assignment` is a concrete type (Same as V0Assignment). mod v1 { use frame_support::{ - pallet_prelude::ValueQuery, storage_alias, traits::OnRuntimeUpgrade, weights::Weight, + pallet_prelude::ValueQuery, storage_alias, traits::UncheckedOnRuntimeUpgrade, + weights::Weight, }; use frame_system::pallet_prelude::BlockNumberFor; @@ -165,7 +166,7 @@ mod v1 { /// Migration to V1 pub struct UncheckedMigrateToV1(core::marker::PhantomData); - impl OnRuntimeUpgrade for UncheckedMigrateToV1 { + impl UncheckedOnRuntimeUpgrade for UncheckedMigrateToV1 { fn on_runtime_upgrade() -> Weight { let mut weight: Weight = Weight::zero(); @@ -303,7 +304,7 @@ mod v2 { /// Migration to V2 pub struct UncheckedMigrateToV2(core::marker::PhantomData); - impl OnRuntimeUpgrade for UncheckedMigrateToV2 { + impl UncheckedOnRuntimeUpgrade for UncheckedMigrateToV2 { fn on_runtime_upgrade() -> Weight { let mut weight: Weight = Weight::zero(); diff --git a/polkadot/runtime/parachains/src/scheduler/tests.rs b/polkadot/runtime/parachains/src/scheduler/tests.rs index 22fbd849a536..a4d103f9533d 100644 --- a/polkadot/runtime/parachains/src/scheduler/tests.rs +++ b/polkadot/runtime/parachains/src/scheduler/tests.rs @@ -836,7 +836,7 @@ fn on_demand_claims_are_pruned_after_timing_out() { // #26 now += 1; - // Run to block #n but this time have group 1 conclude the availabilty. + // Run to block #n but this time have group 1 conclude the availability. for n in now..=(now + max_timeouts + 1) { // #n run_to_block(n, |_| None); diff --git a/polkadot/runtime/parachains/src/session_info.rs b/polkadot/runtime/parachains/src/session_info.rs index dcab1d04a5c3..7b74ebca2f82 100644 --- a/polkadot/runtime/parachains/src/session_info.rs +++ b/polkadot/runtime/parachains/src/session_info.rs @@ -158,7 +158,7 @@ impl Pallet { for idx in old_earliest_stored_session..new_earliest_stored_session { Sessions::::remove(&idx); // Idx will be missing for a few sessions after the runtime upgrade. - // But it shouldn'be be a problem. + // But it shouldn't be a problem. AccountKeys::::remove(&idx); SessionExecutorParams::::remove(&idx); } diff --git a/polkadot/runtime/parachains/src/ump_tests.rs b/polkadot/runtime/parachains/src/ump_tests.rs index 9341a3b4f28d..dc4dbbe0b60d 100644 --- a/polkadot/runtime/parachains/src/ump_tests.rs +++ b/polkadot/runtime/parachains/src/ump_tests.rs @@ -455,7 +455,7 @@ fn verify_relay_dispatch_queue_size_is_externally_accessible() { fn assert_queue_size(para: ParaId, count: u32, size: u32) { #[allow(deprecated)] let raw_queue_size = sp_io::storage::get(&well_known_keys::relay_dispatch_queue_size(para)).expect( - "enqueing a message should create the dispatch queue\ + "enqueuing a message should create the dispatch queue\ and it should be accessible via the well known keys", ); let (c, s) = <(u32, u32)>::decode(&mut &raw_queue_size[..]) @@ -465,7 +465,7 @@ fn assert_queue_size(para: ParaId, count: u32, size: u32) { // Test the deprecated but at least type-safe `relay_dispatch_queue_size_typed`: #[allow(deprecated)] let (c, s) = well_known_keys::relay_dispatch_queue_size_typed(para).get().expect( - "enqueing a message should create the dispatch queue\ + "enqueuing a message should create the dispatch queue\ and it should be accessible via the well known keys", ); assert_eq!((c, s), (count, size)); diff --git a/polkadot/runtime/parachains/src/util.rs b/polkadot/runtime/parachains/src/util.rs index 43ca5b3992d6..0585b583b886 100644 --- a/polkadot/runtime/parachains/src/util.rs +++ b/polkadot/runtime/parachains/src/util.rs @@ -21,7 +21,7 @@ use alloc::vec::Vec; use alloc::{collections::btree_set::BTreeSet}; use frame_system::pallet_prelude::BlockNumberFor; -use primitives::{Id as ParaId, PersistedValidationData, ValidatorIndex}; +use primitives::{HeadData, Id as ParaId, PersistedValidationData, ValidatorIndex}; use crate::{configuration, hrmp, paras}; @@ -44,6 +44,23 @@ pub fn make_persisted_validation_data( }) } +/// Make the persisted validation data for a particular parachain, a specified relay-parent, its +/// storage root and parent head data. +pub fn make_persisted_validation_data_with_parent( + relay_parent_number: BlockNumberFor, + relay_parent_storage_root: T::Hash, + parent_head: HeadData, +) -> PersistedValidationData> { + let config = >::config(); + + PersistedValidationData { + parent_head, + relay_parent_number, + relay_parent_storage_root, + max_pov_size: config.max_pov_size, + } +} + /// Take an active subset of a set containing all validators. /// /// First item in pair will be all items in set have indices found in the `active` indices set (in diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml index b7aac0ab7f35..e020bc4d2028 100644 --- a/polkadot/runtime/rococo/Cargo.toml +++ b/polkadot/runtime/rococo/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } log = { workspace = true } serde = { workspace = true } serde_derive = { optional = true, workspace = true } @@ -59,7 +59,6 @@ pallet-asset-rate = { path = "../../../substrate/frame/asset-rate", default-feat frame-executive = { path = "../../../substrate/frame/executive", default-features = false } pallet-grandpa = { path = "../../../substrate/frame/grandpa", default-features = false } pallet-identity = { path = "../../../substrate/frame/identity", default-features = false } -pallet-im-online = { path = "../../../substrate/frame/im-online", default-features = false } pallet-indices = { path = "../../../substrate/frame/indices", default-features = false } pallet-membership = { path = "../../../substrate/frame/membership", default-features = false } pallet-message-queue = { path = "../../../substrate/frame/message-queue", default-features = false } @@ -103,6 +102,7 @@ polkadot-parachain-primitives = { path = "../../parachain", default-features = f xcm = { package = "staging-xcm", path = "../../xcm", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../../xcm/xcm-executor", default-features = false } xcm-builder = { package = "staging-xcm-builder", path = "../../xcm/xcm-builder", default-features = false } +xcm-fee-payment-runtime-api = { path = "../../xcm/xcm-fee-payment-runtime-api", default-features = false } [dev-dependencies] tiny-keccak = { version = "2.0.2", features = ["keccak"] } @@ -151,7 +151,6 @@ std = [ "pallet-elections-phragmen/std", "pallet-grandpa/std", "pallet-identity/std", - "pallet-im-online/std", "pallet-indices/std", "pallet-membership/std", "pallet-message-queue/std", @@ -206,6 +205,7 @@ std = [ "tx-pool-api/std", "xcm-builder/std", "xcm-executor/std", + "xcm-fee-payment-runtime-api/std", "xcm/std", ] runtime-benchmarks = [ @@ -224,7 +224,6 @@ runtime-benchmarks = [ "pallet-elections-phragmen/runtime-benchmarks", "pallet-grandpa/runtime-benchmarks", "pallet-identity/runtime-benchmarks", - "pallet-im-online/runtime-benchmarks", "pallet-indices/runtime-benchmarks", "pallet-membership/runtime-benchmarks", "pallet-message-queue/runtime-benchmarks", @@ -280,7 +279,6 @@ try-runtime = [ "pallet-elections-phragmen/try-runtime", "pallet-grandpa/try-runtime", "pallet-identity/try-runtime", - "pallet-im-online/try-runtime", "pallet-indices/try-runtime", "pallet-membership/try-runtime", "pallet-message-queue/try-runtime", diff --git a/polkadot/runtime/rococo/README.md b/polkadot/runtime/rococo/README.md index 5b2c296f0ced..c19c3654fe4d 100644 --- a/polkadot/runtime/rococo/README.md +++ b/polkadot/runtime/rococo/README.md @@ -4,7 +4,7 @@ Rococo is a testnet runtime with no stability guarantees. ## How to build `rococo` runtime `EpochDurationInBlocks` parameter is configurable via `ROCOCO_EPOCH_DURATION` environment variable. To build wasm -runtime blob with customized epoch duration the following command shall be exectuted: +runtime blob with customized epoch duration the following command shall be executed: ```bash ROCOCO_EPOCH_DURATION=10 ./polkadot/scripts/build-only-wasm.sh rococo-runtime /path/to/output/directory/ ``` diff --git a/polkadot/runtime/rococo/src/impls.rs b/polkadot/runtime/rococo/src/impls.rs index 5c3c191f7bf1..e1ab7b00efc4 100644 --- a/polkadot/runtime/rococo/src/impls.rs +++ b/polkadot/runtime/rococo/src/impls.rs @@ -169,11 +169,16 @@ where }, ]); + let encoded_versioned_xcm = + VersionedXcm::V4(program).encode().try_into().map_err(|error| { + log::error!(target: "runtime::on_reap_identity", "XCM too large, error: {:?}", error); + pallet_xcm::Error::::XcmTooLarge + })?; // send - let _ = >::send( + let _ = >::send_blob( RawOrigin::Root.into(), Box::new(VersionedLocation::V4(destination)), - Box::new(VersionedXcm::V4(program)), + encoded_versioned_xcm, )?; Ok(()) } diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index cd4111e221bc..8d3e3cd45770 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -29,13 +29,12 @@ use core::cmp::Ordering; use pallet_nis::WithMaximumOf; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use primitives::{ - slashing, - vstaging::{ApprovalVotingParams, NodeFeatures}, - AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, - Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, Moment, Nonce, - OccupiedCoreAssumption, PersistedValidationData, ScrapedOnChainVotes, SessionInfo, Signature, - ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, PARACHAIN_KEY_TYPE_ID, + slashing, AccountId, AccountIndex, ApprovalVotingParams, Balance, BlockNumber, CandidateEvent, + CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, + GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, Moment, + NodeFeatures, Nonce, OccupiedCoreAssumption, PersistedValidationData, ScrapedOnChainVotes, + SessionInfo, Signature, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, + PARACHAIN_KEY_TYPE_ID, }; use rococo_runtime_constants::system_parachain::BROKER_ID; use runtime_common::{ @@ -44,7 +43,7 @@ use runtime_common::{ LocatableAssetConverter, ToAuthor, VersionedLocatableAsset, VersionedLocationConverter, }, paras_registrar, paras_sudo_wrapper, prod_or_fast, slots, - traits::Leaser, + traits::{Leaser, OnSwap}, BlockHashCount, BlockLength, SlowAdjustingFeeUpdate, }; use scale_info::TypeInfo; @@ -58,9 +57,7 @@ use runtime_parachains::{ inclusion::{AggregateMessageOrigin, UmpQueueId}, initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras, paras_inherent as parachains_paras_inherent, - runtime_api_impl::{ - v7 as parachains_runtime_api_impl, vstaging as parachains_staging_runtime_api_impl, - }, + runtime_api_impl::v10 as parachains_runtime_api_impl, scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, }; @@ -80,7 +77,7 @@ use frame_support::{ InstanceFilter, KeyOwnerProofSystem, LinearStoragePrice, PrivilegeCmp, ProcessMessage, ProcessMessageError, StorageMapShim, WithdrawReasons, }, - weights::{ConstantMultiplier, WeightMeter}, + weights::{ConstantMultiplier, WeightMeter, WeightToFee as _}, PalletId, }; use frame_system::{EnsureRoot, EnsureSigned}; @@ -96,14 +93,16 @@ use sp_runtime::{ IdentityLookup, Keccak256, OpaqueKeys, SaturatedConversion, Verify, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, BoundToRuntimeAppPublic, FixedU128, KeyTypeId, Perbill, Percent, Permill, - RuntimeAppPublic, RuntimeDebug, + ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, Percent, Permill, RuntimeDebug, }; use sp_staking::SessionIndex; #[cfg(any(feature = "std", test))] use sp_version::NativeVersion; use sp_version::RuntimeVersion; -use xcm::{latest::prelude::*, VersionedLocation}; +use xcm::{ + latest::prelude::*, IntoVersion, VersionedAssetId, VersionedAssets, VersionedLocation, + VersionedXcm, +}; use xcm_builder::PayOverXcm; pub use frame_system::Call as SystemCall; @@ -128,6 +127,7 @@ use governance::{ pallet_custom_origins, AuctionAdmin, Fellows, GeneralAdmin, LeaseAdmin, Treasurer, TreasurySpender, }; +use xcm_fee_payment_runtime_api::Error as XcmPaymentApiError; #[cfg(test)] mod tests; @@ -154,7 +154,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("rococo"), impl_name: create_runtime_str!("parity-rococo-v2.0"), authoring_version: 0, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 24, @@ -221,7 +221,7 @@ pub struct OriginPrivilegeCmp; impl PrivilegeCmp for OriginPrivilegeCmp { fn cmp_privilege(left: &OriginCaller, right: &OriginCaller) -> Option { if left == right { - return Some(Ordering::Equal) + return Some(Ordering::Equal); } match (left, right) { @@ -351,46 +351,6 @@ impl pallet_authorship::Config for Runtime { type EventHandler = (); } -#[derive(Clone, Debug, PartialEq, Eq, Encode, Decode)] -pub struct OldSessionKeys { - pub grandpa: ::Public, - pub babe: ::Public, - pub im_online: pallet_im_online::sr25519::AuthorityId, - pub para_validator: ::Public, - pub para_assignment: ::Public, - pub authority_discovery: ::Public, - pub beefy: ::Public, -} - -impl OpaqueKeys for OldSessionKeys { - type KeyTypeIdProviders = (); - fn key_ids() -> &'static [KeyTypeId] { - &[ - <::Public>::ID, - <::Public>::ID, - sp_core::crypto::key_types::IM_ONLINE, - <::Public>::ID, - <::Public>::ID, - <::Public>::ID, - <::Public>::ID, - ] - } - fn get_raw(&self, i: KeyTypeId) -> &[u8] { - match i { - <::Public>::ID => self.grandpa.as_ref(), - <::Public>::ID => self.babe.as_ref(), - sp_core::crypto::key_types::IM_ONLINE => self.im_online.as_ref(), - <::Public>::ID => self.para_validator.as_ref(), - <::Public>::ID => - self.para_assignment.as_ref(), - <::Public>::ID => - self.authority_discovery.as_ref(), - <::Public>::ID => self.beefy.as_ref(), - _ => &[], - } - } -} - impl_opaque_keys! { pub struct SessionKeys { pub grandpa: Grandpa, @@ -402,18 +362,6 @@ impl_opaque_keys! { } } -// remove this when removing `OldSessionKeys` -fn transform_session_keys(_val: AccountId, old: OldSessionKeys) -> SessionKeys { - SessionKeys { - grandpa: old.grandpa, - babe: old.babe, - para_validator: old.para_validator, - para_assignment: old.para_assignment, - authority_discovery: old.authority_discovery, - beefy: old.beefy, - } -} - /// Special `ValidatorIdOf` implementation that is just returning the input as result. pub struct ValidatorIdOf; impl sp_runtime::traits::Convert> for ValidatorIdOf { @@ -988,6 +936,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = MessageQueueHeapSize; type MaxStale = MessageQueueMaxStale; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; #[cfg(not(feature = "runtime-benchmarks"))] type MessageProcessor = MessageProcessor; #[cfg(feature = "runtime-benchmarks")] @@ -1083,7 +1032,7 @@ impl paras_registrar::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; type RuntimeEvent = RuntimeEvent; type Currency = Balances; - type OnSwap = (Crowdloan, Slots); + type OnSwap = (Crowdloan, Slots, SwapLeases); type ParaDeposit = ParaDeposit; type DataDepositPerByte = DataDepositPerByte; type WeightInfo = weights::runtime_common_paras_registrar::WeightInfo; @@ -1311,6 +1260,14 @@ impl pallet_asset_rate::Config for Runtime { type BenchmarkHelper = runtime_common::impls::benchmarks::AssetRateArguments; } +// Notify `coretime` pallet when a lease swap occurs +pub struct SwapLeases; +impl OnSwap for SwapLeases { + fn on_swap(one: ParaId, other: ParaId) { + coretime::Pallet::::on_legacy_lease_swap(one, other); + } +} + construct_runtime! { pub enum Runtime { @@ -1481,8 +1438,6 @@ pub mod migrations { use frame_support::traits::LockIdentifier; use frame_system::pallet_prelude::BlockNumberFor; - #[cfg(feature = "try-runtime")] - use sp_core::crypto::ByteArray; pub struct GetLegacyLeaseImpl; impl coretime::migration::GetLegacyLease for GetLegacyLeaseImpl { @@ -1490,11 +1445,11 @@ pub mod migrations { let now = frame_system::Pallet::::block_number(); let lease = slots::Pallet::::lease(para); if lease.is_empty() { - return None + return None; } // Lease not yet started, ignore: if lease.iter().any(Option::is_none) { - return None + return None; } let (index, _) = as Leaser>::lease_period_index(now)?; @@ -1509,7 +1464,6 @@ pub mod migrations { pub const PhragmenElectionPalletName: &'static str = "PhragmenElection"; pub const TechnicalMembershipPalletName: &'static str = "TechnicalMembership"; pub const TipsPalletName: &'static str = "Tips"; - pub const ImOnlinePalletName: &'static str = "ImOnline"; pub const PhragmenElectionPalletId: LockIdentifier = *b"phrelect"; } @@ -1546,77 +1500,6 @@ pub mod migrations { type PalletName = TipsPalletName; } - /// Upgrade Session keys to exclude `ImOnline` key. - /// When this is removed, should also remove `OldSessionKeys`. - pub struct UpgradeSessionKeys; - const UPGRADE_SESSION_KEYS_FROM_SPEC: u32 = 104000; - - impl frame_support::traits::OnRuntimeUpgrade for UpgradeSessionKeys { - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { - if System::last_runtime_upgrade_spec_version() > UPGRADE_SESSION_KEYS_FROM_SPEC { - log::warn!(target: "runtime::session_keys", "Skipping session keys migration pre-upgrade check due to spec version (already applied?)"); - return Ok(Vec::new()) - } - - log::info!(target: "runtime::session_keys", "Collecting pre-upgrade session keys state"); - let key_ids = SessionKeys::key_ids(); - frame_support::ensure!( - key_ids.into_iter().find(|&k| *k == sp_core::crypto::key_types::IM_ONLINE) == None, - "New session keys contain the ImOnline key that should have been removed", - ); - let storage_key = pallet_session::QueuedKeys::::hashed_key(); - let mut state: Vec = Vec::new(); - frame_support::storage::unhashed::get::>( - &storage_key, - ) - .ok_or::("Queued keys are not available".into())? - .into_iter() - .for_each(|(id, keys)| { - state.extend_from_slice(id.as_slice()); - for key_id in key_ids { - state.extend_from_slice(keys.get_raw(*key_id)); - } - }); - frame_support::ensure!(state.len() > 0, "Queued keys are not empty before upgrade"); - Ok(state) - } - - fn on_runtime_upgrade() -> Weight { - if System::last_runtime_upgrade_spec_version() > UPGRADE_SESSION_KEYS_FROM_SPEC { - log::info!("Skipping session keys upgrade: already applied"); - return ::DbWeight::get().reads(1) - } - log::trace!("Upgrading session keys"); - Session::upgrade_keys::(transform_session_keys); - Perbill::from_percent(50) * BlockWeights::get().max_block - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade(old_state: alloc::vec::Vec) -> Result<(), sp_runtime::TryRuntimeError> { - if System::last_runtime_upgrade_spec_version() > UPGRADE_SESSION_KEYS_FROM_SPEC { - log::warn!(target: "runtime::session_keys", "Skipping session keys migration post-upgrade check due to spec version (already applied?)"); - return Ok(()) - } - - let key_ids = SessionKeys::key_ids(); - let mut new_state: Vec = Vec::new(); - pallet_session::QueuedKeys::::get().into_iter().for_each(|(id, keys)| { - new_state.extend_from_slice(id.as_slice()); - for key_id in key_ids { - new_state.extend_from_slice(keys.get_raw(*key_id)); - } - }); - frame_support::ensure!(new_state.len() > 0, "Queued keys are not empty after upgrade"); - frame_support::ensure!( - old_state == new_state, - "Pre-upgrade and post-upgrade keys do not match!" - ); - log::info!(target: "runtime::session_keys", "Session keys migrated successfully"); - Ok(()) - } - } - // We don't have a limit in the Relay Chain. const IDENTITY_MIGRATION_KEY_LIMIT: u64 = u64::MAX; @@ -1650,21 +1533,18 @@ pub mod migrations { pallet_grandpa::migrations::MigrateV4ToV5, parachains_configuration::migration::v10::MigrateToV10, - // Upgrade `SessionKeys` to exclude `ImOnline` - UpgradeSessionKeys, - - // Remove `im-online` pallet on-chain storage - frame_support::migrations::RemovePallet::DbWeight>, - // Migrate Identity pallet for Usernames pallet_identity::migration::versioned::V0ToV1, parachains_configuration::migration::v11::MigrateToV11, // This needs to come after the `parachains_configuration` above as we are reading the configuration. coretime::migration::MigrateToCoretime, parachains_configuration::migration::v12::MigrateToV12, + parachains_assigner_on_demand::migration::MigrateV0ToV1, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, + + parachains_inclusion::migration::MigrateToV1, ); } @@ -1777,6 +1657,37 @@ sp_api::impl_runtime_apis! { } } + impl xcm_fee_payment_runtime_api::XcmPaymentApi for Runtime { + fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { + if !matches!(xcm_version, 3 | 4) { + return Err(XcmPaymentApiError::UnhandledXcmVersion); + } + Ok([VersionedAssetId::V4(xcm_config::TokenLocation::get().into())] + .into_iter() + .filter_map(|asset| asset.into_version(xcm_version).ok()) + .collect()) + } + + fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { + let local_asset = VersionedAssetId::V4(xcm_config::TokenLocation::get().into()); + let asset = asset + .into_version(4) + .map_err(|_| XcmPaymentApiError::VersionedConversionFailed)?; + + if asset != local_asset { return Err(XcmPaymentApiError::AssetNotFound); } + + Ok(WeightToFee::weight_to_fee(&weight)) + } + + fn query_xcm_weight(message: VersionedXcm<()>) -> Result { + XcmPallet::query_xcm_weight(message) + } + + fn query_delivery_fees(destination: VersionedLocation, message: VersionedXcm<()>) -> Result { + XcmPallet::query_delivery_fees(destination, message) + } + } + impl sp_api::Metadata for Runtime { fn metadata() -> OpaqueMetadata { OpaqueMetadata::new(Runtime::metadata().into()) @@ -1824,12 +1735,6 @@ sp_api::impl_runtime_apis! { impl offchain_primitives::OffchainWorkerApi for Runtime { fn offchain_worker(header: &::Header) { - use sp_runtime::{traits::Header, DigestItem}; - - if header.digest().logs().iter().any(|di| di == &DigestItem::RuntimeEnvironmentUpdated) { - pallet_im_online::migration::clear_offchain_storage(Session::validators().len() as u32); - } - Executive::offchain_worker(header) } } @@ -1979,22 +1884,22 @@ sp_api::impl_runtime_apis! { } fn approval_voting_params() -> ApprovalVotingParams { - parachains_staging_runtime_api_impl::approval_voting_params::() + parachains_runtime_api_impl::approval_voting_params::() } fn disabled_validators() -> Vec { - parachains_staging_runtime_api_impl::disabled_validators::() + parachains_runtime_api_impl::disabled_validators::() } fn node_features() -> NodeFeatures { - parachains_staging_runtime_api_impl::node_features::() + parachains_runtime_api_impl::node_features::() } } #[api_version(3)] impl beefy_primitives::BeefyApi for Runtime { fn beefy_genesis() -> Option { - Beefy::genesis_block() + pallet_beefy::GenesisBlock::::get() } fn validator_set() -> Option> { @@ -2032,11 +1937,11 @@ sp_api::impl_runtime_apis! { #[api_version(2)] impl mmr::MmrApi for Runtime { fn mmr_root() -> Result { - Ok(Mmr::mmr_root()) + Ok(pallet_mmr::RootHash::::get()) } fn mmr_leaf_count() -> Result { - Ok(Mmr::mmr_leaves()) + Ok(pallet_mmr::NumberOfLeaves::::get()) } fn generate_proof( @@ -2485,7 +2390,7 @@ mod remote_tests { #[tokio::test] async fn run_migrations() { if var("RUN_MIGRATION_TESTS").is_err() { - return + return; } sp_tracing::try_init_simple(); diff --git a/polkadot/runtime/rococo/src/weights/pallet_xcm.rs b/polkadot/runtime/rococo/src/weights/pallet_xcm.rs index 5544ca44658c..42972baa1c83 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_xcm.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -60,8 +60,26 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3645` - // Minimum execution time: 25_043_000 picoseconds. - Weight::from_parts(25_682_000, 0) + // Minimum execution time: 24_724_000 picoseconds. + Weight::from_parts(25_615_000, 0) + .saturating_add(Weight::from_parts(0, 3645)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn send_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 24_709_000 picoseconds. + Weight::from_parts(25_326_000, 0) .saturating_add(Weight::from_parts(0, 3645)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -80,8 +98,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3645` - // Minimum execution time: 107_570_000 picoseconds. - Weight::from_parts(109_878_000, 0) + // Minimum execution time: 106_600_000 picoseconds. + Weight::from_parts(110_781_000, 0) .saturating_add(Weight::from_parts(0, 3645)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) @@ -100,8 +118,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `232` // Estimated: `3697` - // Minimum execution time: 106_341_000 picoseconds. - Weight::from_parts(109_135_000, 0) + // Minimum execution time: 103_030_000 picoseconds. + Weight::from_parts(106_018_000, 0) .saturating_add(Weight::from_parts(0, 3697)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) @@ -120,8 +138,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3645` - // Minimum execution time: 108_372_000 picoseconds. - Weight::from_parts(112_890_000, 0) + // Minimum execution time: 107_017_000 picoseconds. + Weight::from_parts(109_214_000, 0) .saturating_add(Weight::from_parts(0, 3645)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) @@ -130,8 +148,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_957_000 picoseconds. - Weight::from_parts(7_417_000, 0) + // Minimum execution time: 6_864_000 picoseconds. + Weight::from_parts(7_135_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn execute_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_955_000 picoseconds. + Weight::from_parts(7_165_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `XcmPallet::SupportedVersion` (r:0 w:1) @@ -140,8 +166,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_053_000 picoseconds. - Weight::from_parts(7_462_000, 0) + // Minimum execution time: 6_827_000 picoseconds. + Weight::from_parts(7_211_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -149,8 +175,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_918_000 picoseconds. - Weight::from_parts(2_037_000, 0) + // Minimum execution time: 1_788_000 picoseconds. + Weight::from_parts(2_021_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) @@ -171,8 +197,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3645` - // Minimum execution time: 30_417_000 picoseconds. - Weight::from_parts(31_191_000, 0) + // Minimum execution time: 30_627_000 picoseconds. + Weight::from_parts(31_350_000, 0) .saturating_add(Weight::from_parts(0, 3645)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) @@ -193,8 +219,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `360` // Estimated: `3825` - // Minimum execution time: 36_666_000 picoseconds. - Weight::from_parts(37_779_000, 0) + // Minimum execution time: 36_688_000 picoseconds. + Weight::from_parts(37_345_000, 0) .saturating_add(Weight::from_parts(0, 3825)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) @@ -205,8 +231,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_869_000 picoseconds. - Weight::from_parts(2_003_000, 0) + // Minimum execution time: 1_829_000 picoseconds. + Weight::from_parts(1_986_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -216,8 +242,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `22` // Estimated: `13387` - // Minimum execution time: 16_188_000 picoseconds. - Weight::from_parts(16_435_000, 0) + // Minimum execution time: 16_104_000 picoseconds. + Weight::from_parts(16_464_000, 0) .saturating_add(Weight::from_parts(0, 13387)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -228,8 +254,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `26` // Estimated: `13391` - // Minimum execution time: 16_431_000 picoseconds. - Weight::from_parts(16_935_000, 0) + // Minimum execution time: 16_267_000 picoseconds. + Weight::from_parts(16_675_000, 0) .saturating_add(Weight::from_parts(0, 13391)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -240,8 +266,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `40` // Estimated: `15880` - // Minimum execution time: 18_460_000 picoseconds. - Weight::from_parts(18_885_000, 0) + // Minimum execution time: 18_487_000 picoseconds. + Weight::from_parts(19_102_000, 0) .saturating_add(Weight::from_parts(0, 15880)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -259,8 +285,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `216` // Estimated: `6156` - // Minimum execution time: 29_623_000 picoseconds. - Weight::from_parts(30_661_000, 0) + // Minimum execution time: 29_603_000 picoseconds. + Weight::from_parts(31_002_000, 0) .saturating_add(Weight::from_parts(0, 6156)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) @@ -271,8 +297,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `69` // Estimated: `10959` - // Minimum execution time: 12_043_000 picoseconds. - Weight::from_parts(12_360_000, 0) + // Minimum execution time: 12_183_000 picoseconds. + Weight::from_parts(12_587_000, 0) .saturating_add(Weight::from_parts(0, 10959)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -282,8 +308,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `33` // Estimated: `13398` - // Minimum execution time: 16_511_000 picoseconds. - Weight::from_parts(17_011_000, 0) + // Minimum execution time: 16_372_000 picoseconds. + Weight::from_parts(16_967_000, 0) .saturating_add(Weight::from_parts(0, 13398)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -302,8 +328,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `216` // Estimated: `13581` - // Minimum execution time: 39_041_000 picoseconds. - Weight::from_parts(39_883_000, 0) + // Minimum execution time: 38_904_000 picoseconds. + Weight::from_parts(39_983_000, 0) .saturating_add(Weight::from_parts(0, 13581)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) @@ -316,8 +342,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 2_030_000 picoseconds. - Weight::from_parts(2_150_000, 0) + // Minimum execution time: 2_067_000 picoseconds. + Weight::from_parts(2_195_000, 0) .saturating_add(Weight::from_parts(0, 1485)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -328,8 +354,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7576` // Estimated: `11041` - // Minimum execution time: 22_615_000 picoseconds. - Weight::from_parts(23_008_000, 0) + // Minimum execution time: 23_982_000 picoseconds. + Weight::from_parts(24_409_000, 0) .saturating_add(Weight::from_parts(0, 11041)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -340,8 +366,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `23` // Estimated: `3488` - // Minimum execution time: 34_438_000 picoseconds. - Weight::from_parts(35_514_000, 0) + // Minimum execution time: 33_430_000 picoseconds. + Weight::from_parts(34_433_000, 0) .saturating_add(Weight::from_parts(0, 3488)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs index ac0f05301b48..dba9e7904c79 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `runtime_parachains::assigner_on_demand` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-fljshgub-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -31,11 +31,11 @@ // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=runtime_parachains::assigner_on_demand // --chain=rococo-dev -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -48,44 +48,44 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::assigner_on_demand`. pub struct WeightInfo(PhantomData); impl runtime_parachains::assigner_on_demand::WeightInfo for WeightInfo { - /// Storage: `OnDemandAssignmentProvider::SpotTraffic` (r:1 w:0) - /// Proof: `OnDemandAssignmentProvider::SpotTraffic` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `Paras::ParaLifecycles` (r:1 w:0) - /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `OnDemandAssignmentProvider::OnDemandQueue` (r:1 w:1) - /// Proof: `OnDemandAssignmentProvider::OnDemandQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::QueueStatus` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::QueueStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::ParaIdAffinity` (r:1 w:0) + /// Proof: `OnDemandAssignmentProvider::ParaIdAffinity` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::FreeEntries` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::FreeEntries` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `s` is `[1, 9999]`. fn place_order_keep_alive(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `297 + s * (4 ±0)` - // Estimated: `3762 + s * (4 ±0)` - // Minimum execution time: 33_522_000 picoseconds. - Weight::from_parts(35_436_835, 0) - .saturating_add(Weight::from_parts(0, 3762)) - // Standard Error: 129 - .saturating_add(Weight::from_parts(14_041, 0).saturating_mul(s.into())) + // Measured: `218 + s * (8 ±0)` + // Estimated: `3681 + s * (8 ±0)` + // Minimum execution time: 21_053_000 picoseconds. + Weight::from_parts(17_291_897, 0) + .saturating_add(Weight::from_parts(0, 3681)) + // Standard Error: 104 + .saturating_add(Weight::from_parts(18_779, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(s.into())) } - /// Storage: `OnDemandAssignmentProvider::SpotTraffic` (r:1 w:0) - /// Proof: `OnDemandAssignmentProvider::SpotTraffic` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `Paras::ParaLifecycles` (r:1 w:0) - /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `OnDemandAssignmentProvider::OnDemandQueue` (r:1 w:1) - /// Proof: `OnDemandAssignmentProvider::OnDemandQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::QueueStatus` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::QueueStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::ParaIdAffinity` (r:1 w:0) + /// Proof: `OnDemandAssignmentProvider::ParaIdAffinity` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::FreeEntries` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::FreeEntries` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `s` is `[1, 9999]`. fn place_order_allow_death(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `297 + s * (4 ±0)` - // Estimated: `3762 + s * (4 ±0)` - // Minimum execution time: 33_488_000 picoseconds. - Weight::from_parts(34_848_934, 0) - .saturating_add(Weight::from_parts(0, 3762)) - // Standard Error: 143 - .saturating_add(Weight::from_parts(14_215, 0).saturating_mul(s.into())) + // Measured: `218 + s * (8 ±0)` + // Estimated: `3681 + s * (8 ±0)` + // Minimum execution time: 20_843_000 picoseconds. + Weight::from_parts(16_881_986, 0) + .saturating_add(Weight::from_parts(0, 3681)) + // Standard Error: 104 + .saturating_add(Weight::from_parts(18_788, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(s.into())) } } diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs index 4edbed98f2da..c250c86665be 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs @@ -13,29 +13,34 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . + //! Autogenerated weights for `runtime_parachains::paras_inherent` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2021-11-20, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 128 +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/release/polkadot +// target/production/polkadot // benchmark -// --chain=rococo-dev +// pallet // --steps=50 // --repeat=20 -// --pallet=runtime_parachains::paras_inherent // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs -// --header=./file_header.txt +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=runtime_parachains::paras_inherent +// --chain=rococo-dev +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::Weight}; use core::marker::PhantomData; @@ -43,131 +48,287 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::paras_inherent`. pub struct WeightInfo(PhantomData); impl runtime_parachains::paras_inherent::WeightInfo for WeightInfo { - // Storage: ParaInherent Included (r:1 w:1) - // Storage: System ParentHash (r:1 w:0) - // Storage: ParaScheduler AvailabilityCores (r:1 w:1) - // Storage: ParasShared CurrentSessionIndex (r:1 w:0) - // Storage: Configuration ActiveConfig (r:1 w:0) - // Storage: ParaSessionInfo Sessions (r:1 w:0) - // Storage: ParasDisputes Disputes (r:1 w:1) - // Storage: ParasDisputes Included (r:1 w:1) - // Storage: ParasDisputes SpamSlots (r:1 w:1) - // Storage: ParasDisputes Frozen (r:1 w:0) - // Storage: ParaInclusion PendingAvailability (r:2 w:1) - // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - // Storage: Paras Parachains (r:1 w:0) - // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - // Storage: Dmp DownwardMessageQueues (r:1 w:1) - // Storage: Hrmp HrmpChannelDigests (r:1 w:1) - // Storage: Paras FutureCodeUpgrades (r:1 w:0) - // Storage: ParaScheduler SessionStartBlock (r:1 w:0) - // Storage: ParaScheduler ParathreadQueue (r:1 w:1) - // Storage: ParaScheduler Scheduled (r:1 w:1) - // Storage: ParaScheduler ValidatorGroups (r:1 w:0) - // Storage: Ump NeedsDispatch (r:1 w:1) - // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) - // Storage: ParaInherent OnChainVotes (r:0 w:1) - // Storage: Hrmp HrmpWatermarks (r:0 w:1) - // Storage: Paras Heads (r:0 w:1) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaSessionInfo::Sessions` (r:1 w:0) + /// Proof: `ParaSessionInfo::Sessions` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:1) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::BackersOnDisputes` (r:1 w:1) + /// Proof: `ParasDisputes::BackersOnDisputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:1 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::V1` (r:2 w:1) + /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `v` is `[10, 200]`. fn enter_variable_disputes(v: u32, ) -> Weight { - Weight::from_parts(352_590_000 as u64, 0) - // Standard Error: 13_000 - .saturating_add(Weight::from_parts(49_254_000 as u64, 0).saturating_mul(v as u64)) - .saturating_add(T::DbWeight::get().reads(24 as u64)) - .saturating_add(T::DbWeight::get().writes(16 as u64)) + // Proof Size summary in bytes: + // Measured: `67785` + // Estimated: `73725 + v * (23 ±0)` + // Minimum execution time: 949_716_000 picoseconds. + Weight::from_parts(482_361_515, 0) + .saturating_add(Weight::from_parts(0, 73725)) + // Standard Error: 17_471 + .saturating_add(Weight::from_parts(50_100_764, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(25)) + .saturating_add(T::DbWeight::get().writes(15)) + .saturating_add(Weight::from_parts(0, 23).saturating_mul(v.into())) } - // Storage: ParaInherent Included (r:1 w:1) - // Storage: System ParentHash (r:1 w:0) - // Storage: ParaScheduler AvailabilityCores (r:1 w:1) - // Storage: ParasShared CurrentSessionIndex (r:1 w:0) - // Storage: Configuration ActiveConfig (r:1 w:0) - // Storage: ParasDisputes Frozen (r:1 w:0) - // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - // Storage: Paras Parachains (r:1 w:0) - // Storage: ParaInclusion PendingAvailability (r:2 w:1) - // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - // Storage: Dmp DownwardMessageQueues (r:1 w:1) - // Storage: Hrmp HrmpChannelDigests (r:1 w:1) - // Storage: Paras FutureCodeUpgrades (r:1 w:0) - // Storage: ParasDisputes Disputes (r:1 w:0) - // Storage: ParaScheduler SessionStartBlock (r:1 w:0) - // Storage: ParaScheduler ParathreadQueue (r:1 w:1) - // Storage: ParaScheduler Scheduled (r:1 w:1) - // Storage: ParaScheduler ValidatorGroups (r:1 w:0) - // Storage: Ump NeedsDispatch (r:1 w:1) - // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) - // Storage: ParaInclusion AvailabilityBitfields (r:0 w:1) - // Storage: ParaInherent OnChainVotes (r:0 w:1) - // Storage: ParasDisputes Included (r:0 w:1) - // Storage: Hrmp HrmpWatermarks (r:0 w:1) - // Storage: Paras Heads (r:0 w:1) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::V1` (r:2 w:1) + /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:0) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:0 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) fn enter_bitfields() -> Weight { - Weight::from_parts(299_878_000 as u64, 0) - .saturating_add(T::DbWeight::get().reads(21 as u64)) - .saturating_add(T::DbWeight::get().writes(15 as u64)) + // Proof Size summary in bytes: + // Measured: `42757` + // Estimated: `48697` + // Minimum execution time: 437_627_000 picoseconds. + Weight::from_parts(460_975_000, 0) + .saturating_add(Weight::from_parts(0, 48697)) + .saturating_add(T::DbWeight::get().reads(23)) + .saturating_add(T::DbWeight::get().writes(15)) } - // Storage: ParaInherent Included (r:1 w:1) - // Storage: System ParentHash (r:1 w:0) - // Storage: ParaScheduler AvailabilityCores (r:1 w:1) - // Storage: ParasShared CurrentSessionIndex (r:1 w:0) - // Storage: Configuration ActiveConfig (r:1 w:0) - // Storage: ParasDisputes Frozen (r:1 w:0) - // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - // Storage: Paras Parachains (r:1 w:0) - // Storage: ParaInclusion PendingAvailability (r:2 w:1) - // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - // Storage: Dmp DownwardMessageQueues (r:1 w:1) - // Storage: Hrmp HrmpChannelDigests (r:1 w:1) - // Storage: Paras FutureCodeUpgrades (r:1 w:0) - // Storage: ParasDisputes Disputes (r:2 w:0) - // Storage: ParaScheduler SessionStartBlock (r:1 w:0) - // Storage: ParaScheduler ParathreadQueue (r:1 w:1) - // Storage: ParaScheduler Scheduled (r:1 w:1) - // Storage: ParaScheduler ValidatorGroups (r:1 w:0) - // Storage: Paras PastCodeMeta (r:1 w:0) - // Storage: Paras CurrentCodeHash (r:1 w:0) - // Storage: Ump RelayDispatchQueueSize (r:1 w:0) - // Storage: Ump NeedsDispatch (r:1 w:1) - // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) - // Storage: ParaInherent OnChainVotes (r:0 w:1) - // Storage: ParasDisputes Included (r:0 w:1) - // Storage: Hrmp HrmpWatermarks (r:0 w:1) - // Storage: Paras Heads (r:0 w:1) - fn enter_backed_candidates_variable(_v: u32) -> Weight { - Weight::from_parts(442_472_000 as u64, 0) - .saturating_add(T::DbWeight::get().reads(25 as u64)) - .saturating_add(T::DbWeight::get().writes(14 as u64)) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::V1` (r:2 w:1) + /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:0) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:0 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `v` is `[101, 200]`. + fn enter_backed_candidates_variable(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `42829` + // Estimated: `48769` + // Minimum execution time: 1_305_254_000 picoseconds. + Weight::from_parts(1_347_160_667, 0) + .saturating_add(Weight::from_parts(0, 48769)) + // Standard Error: 22_128 + .saturating_add(Weight::from_parts(57_229, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(26)) + .saturating_add(T::DbWeight::get().writes(15)) } - // Storage: ParaInherent Included (r:1 w:1) - // Storage: System ParentHash (r:1 w:0) - // Storage: ParaScheduler AvailabilityCores (r:1 w:1) - // Storage: ParasShared CurrentSessionIndex (r:1 w:0) - // Storage: Configuration ActiveConfig (r:1 w:0) - // Storage: ParasDisputes Frozen (r:1 w:0) - // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - // Storage: Paras Parachains (r:1 w:0) - // Storage: ParaInclusion PendingAvailability (r:2 w:1) - // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - // Storage: Dmp DownwardMessageQueues (r:1 w:1) - // Storage: Hrmp HrmpChannelDigests (r:1 w:1) - // Storage: Paras FutureCodeUpgrades (r:1 w:0) - // Storage: ParasDisputes Disputes (r:2 w:0) - // Storage: ParaScheduler SessionStartBlock (r:1 w:0) - // Storage: ParaScheduler ParathreadQueue (r:1 w:1) - // Storage: ParaScheduler Scheduled (r:1 w:1) - // Storage: ParaScheduler ValidatorGroups (r:1 w:0) - // Storage: Paras PastCodeMeta (r:1 w:0) - // Storage: Paras CurrentCodeHash (r:1 w:0) - // Storage: Ump RelayDispatchQueueSize (r:1 w:0) - // Storage: Ump NeedsDispatch (r:1 w:1) - // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) - // Storage: ParaInherent OnChainVotes (r:0 w:1) - // Storage: ParasDisputes Included (r:0 w:1) - // Storage: Hrmp HrmpWatermarks (r:0 w:1) - // Storage: Paras Heads (r:0 w:1) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::V1` (r:2 w:1) + /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:0) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeHash` (r:1 w:0) + /// Proof: `Paras::FutureCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeRestrictionSignal` (r:1 w:0) + /// Proof: `Paras::UpgradeRestrictionSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:0 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) fn enter_backed_candidate_code_upgrade() -> Weight { - Weight::from_parts(36_903_411_000 as u64, 0) - .saturating_add(T::DbWeight::get().reads(25 as u64)) - .saturating_add(T::DbWeight::get().writes(14 as u64)) + // Proof Size summary in bytes: + // Measured: `42842` + // Estimated: `48782` + // Minimum execution time: 38_637_547_000 picoseconds. + Weight::from_parts(41_447_412_000, 0) + .saturating_add(Weight::from_parts(0, 48782)) + .saturating_add(T::DbWeight::get().reads(28)) + .saturating_add(T::DbWeight::get().writes(15)) } } diff --git a/polkadot/runtime/rococo/src/xcm_config.rs b/polkadot/runtime/rococo/src/xcm_config.rs index 67f34916fe78..c7063bd7ad61 100644 --- a/polkadot/runtime/rococo/src/xcm_config.rs +++ b/polkadot/runtime/rococo/src/xcm_config.rs @@ -221,6 +221,9 @@ impl xcm_executor::Config for XcmConfig { type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); } parameter_types! { @@ -256,11 +259,9 @@ pub type LocalPalletOriginToLocation = ( impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; - // We only allow the root, fellows and the staking admin to send messages. - // This is basically safe to enable for everyone (safe the possibility of someone spamming the - // parachain if they're willing to pay the KSM to send from the Relay-chain), but it's useless - // until we bring in XCM v3 which will make `DescendOrigin` a bit more useful. - type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; + // Note that this configuration of `SendXcmOrigin` is different from the one present in + // production. + type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; type XcmRouter = XcmRouter; // Anyone can execute XCM messages locally. type ExecuteXcmOrigin = xcm_builder::EnsureXcmOrigin; diff --git a/polkadot/runtime/test-runtime/Cargo.toml b/polkadot/runtime/test-runtime/Cargo.toml index 78fc29703a28..b2391e1681cb 100644 --- a/polkadot/runtime/test-runtime/Cargo.toml +++ b/polkadot/runtime/test-runtime/Cargo.toml @@ -15,7 +15,7 @@ bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } rustc-hex = { version = "2.1.0", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { workspace = true } serde_derive = { optional = true, workspace = true } smallvec = "1.8.0" diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index c5a34ee03ec9..9b2478297869 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -34,7 +34,7 @@ use polkadot_runtime_parachains::{ disputes::slashing as parachains_slashing, dmp as parachains_dmp, hrmp as parachains_hrmp, inclusion as parachains_inclusion, initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras, - paras_inherent as parachains_paras_inherent, runtime_api_impl::v7 as runtime_impl, + paras_inherent as parachains_paras_inherent, runtime_api_impl::v10 as runtime_impl, scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, }; @@ -833,6 +833,7 @@ sp_api::impl_runtime_apis! { } } + #[api_version(10)] impl primitives::runtime_api::ParachainHost for Runtime { fn validators() -> Vec { runtime_impl::validators::() @@ -960,6 +961,30 @@ sp_api::impl_runtime_apis! { key_ownership_proof, ) } + + fn minimum_backing_votes() -> u32 { + runtime_impl::minimum_backing_votes::() + } + + fn para_backing_state(para_id: ParaId) -> Option { + runtime_impl::backing_state::(para_id) + } + + fn async_backing_params() -> primitives::AsyncBackingParams { + runtime_impl::async_backing_params::() + } + + fn approval_voting_params() -> primitives::ApprovalVotingParams { + runtime_impl::approval_voting_params::() + } + + fn disabled_validators() -> Vec { + runtime_impl::disabled_validators::() + } + + fn node_features() -> primitives::NodeFeatures { + runtime_impl::node_features::() + } } impl beefy_primitives::BeefyApi for Runtime { diff --git a/polkadot/runtime/test-runtime/src/xcm_config.rs b/polkadot/runtime/test-runtime/src/xcm_config.rs index edd6ee1a688f..dd95bd8f4cfd 100644 --- a/polkadot/runtime/test-runtime/src/xcm_config.rs +++ b/polkadot/runtime/test-runtime/src/xcm_config.rs @@ -153,6 +153,9 @@ impl xcm_executor::Config for XcmConfig { type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); } impl pallet_xcm::Config for crate::Runtime { diff --git a/polkadot/runtime/westend/Cargo.toml b/polkadot/runtime/westend/Cargo.toml index 7f8d2c60a565..0f88dbe591f0 100644 --- a/polkadot/runtime/westend/Cargo.toml +++ b/polkadot/runtime/westend/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } log = { workspace = true } rustc-hex = { version = "2.1.0", default-features = false } serde = { workspace = true } @@ -44,7 +44,7 @@ sp-npos-elections = { path = "../../../substrate/primitives/npos-elections", def frame-election-provider-support = { path = "../../../substrate/frame/election-provider-support", default-features = false } frame-executive = { path = "../../../substrate/frame/executive", default-features = false } -frame-support = { path = "../../../substrate/frame/support", default-features = false, features = ["tuples-96"] } +frame-support = { path = "../../../substrate/frame/support", default-features = false, features = ["experimental", "tuples-96"] } frame-system = { path = "../../../substrate/frame/system", default-features = false } frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false } westend-runtime-constants = { package = "westend-runtime-constants", path = "constants", default-features = false } @@ -63,7 +63,6 @@ pallet-election-provider-multi-phase = { path = "../../../substrate/frame/electi pallet-fast-unstake = { path = "../../../substrate/frame/fast-unstake", default-features = false } pallet-grandpa = { path = "../../../substrate/frame/grandpa", default-features = false } pallet-identity = { path = "../../../substrate/frame/identity", default-features = false } -pallet-im-online = { path = "../../../substrate/frame/im-online", default-features = false } pallet-indices = { path = "../../../substrate/frame/indices", default-features = false } pallet-membership = { path = "../../../substrate/frame/membership", default-features = false } pallet-message-queue = { path = "../../../substrate/frame/message-queue", default-features = false } @@ -113,6 +112,7 @@ runtime-parachains = { package = "polkadot-runtime-parachains", path = "../parac xcm = { package = "staging-xcm", path = "../../xcm", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../../xcm/xcm-executor", default-features = false } xcm-builder = { package = "staging-xcm-builder", path = "../../xcm/xcm-builder", default-features = false } +xcm-fee-payment-runtime-api = { path = "../../xcm/xcm-fee-payment-runtime-api", default-features = false } [dev-dependencies] hex-literal = "0.4.1" @@ -165,7 +165,6 @@ std = [ "pallet-fast-unstake/std", "pallet-grandpa/std", "pallet-identity/std", - "pallet-im-online/std", "pallet-indices/std", "pallet-membership/std", "pallet-message-queue/std", @@ -225,6 +224,7 @@ std = [ "westend-runtime-constants/std", "xcm-builder/std", "xcm-executor/std", + "xcm-fee-payment-runtime-api/std", "xcm/std", ] runtime-benchmarks = [ @@ -247,7 +247,6 @@ runtime-benchmarks = [ "pallet-fast-unstake/runtime-benchmarks", "pallet-grandpa/runtime-benchmarks", "pallet-identity/runtime-benchmarks", - "pallet-im-online/runtime-benchmarks", "pallet-indices/runtime-benchmarks", "pallet-membership/runtime-benchmarks", "pallet-message-queue/runtime-benchmarks", @@ -306,7 +305,6 @@ try-runtime = [ "pallet-fast-unstake/try-runtime", "pallet-grandpa/try-runtime", "pallet-identity/try-runtime", - "pallet-im-online/try-runtime", "pallet-indices/try-runtime", "pallet-membership/try-runtime", "pallet-message-queue/try-runtime", diff --git a/polkadot/runtime/westend/src/impls.rs b/polkadot/runtime/westend/src/impls.rs index 23dbbc645071..8cbb014a2e90 100644 --- a/polkadot/runtime/westend/src/impls.rs +++ b/polkadot/runtime/westend/src/impls.rs @@ -169,11 +169,16 @@ where }, ]); + let encoded_versioned_xcm = + VersionedXcm::V4(program).encode().try_into().map_err(|error| { + log::error!(target: "runtime::on_reap_identity", "XCM too large, error: {:?}", error); + pallet_xcm::Error::::XcmTooLarge + })?; // send - let _ = >::send( + let _ = >::send_blob( RawOrigin::Root.into(), Box::new(VersionedLocation::V4(destination)), - Box::new(VersionedXcm::V4(program)), + encoded_versioned_xcm, )?; Ok(()) } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index fa70058b0c10..19b7f4a71ecb 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -17,7 +17,7 @@ //! The Westend runtime. This can be compiled with `#[no_std]`, ready for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -// `construct_runtime!` does a lot of recursion and requires us to increase the limit. +// `#[frame_support::runtime]!` does a lot of recursion and requires us to increase the limit. #![recursion_limit = "512"] extern crate alloc; @@ -32,7 +32,7 @@ use beefy_primitives::{ }; use frame_election_provider_support::{bounds::ElectionBoundsBuilder, onchain, SequentialPhragmen}; use frame_support::{ - construct_runtime, derive_impl, + derive_impl, genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ @@ -40,7 +40,7 @@ use frame_support::{ InstanceFilter, KeyOwnerProofSystem, LinearStoragePrice, ProcessMessage, ProcessMessageError, WithdrawReasons, }, - weights::{ConstantMultiplier, WeightMeter}, + weights::{ConstantMultiplier, WeightMeter, WeightToFee as _}, PalletId, }; use frame_system::{EnsureRoot, EnsureSigned}; @@ -50,14 +50,12 @@ use pallet_session::historical as session_historical; use pallet_transaction_payment::{CurrencyAdapter, FeeDetails, RuntimeDispatchInfo}; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use primitives::{ - slashing, - vstaging::{ApprovalVotingParams, NodeFeatures}, - AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, - Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, Moment, Nonce, - OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, - SessionInfo, Signature, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, - ValidatorSignature, PARACHAIN_KEY_TYPE_ID, + slashing, AccountId, AccountIndex, ApprovalVotingParams, Balance, BlockNumber, CandidateEvent, + CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, + GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, Moment, + NodeFeatures, Nonce, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, + ScrapedOnChainVotes, SessionInfo, Signature, ValidationCode, ValidationCodeHash, ValidatorId, + ValidatorIndex, ValidatorSignature, PARACHAIN_KEY_TYPE_ID, }; use runtime_common::{ assigned_slots, auctions, crowdloan, @@ -67,7 +65,7 @@ use runtime_common::{ LocatableAssetConverter, ToAuthor, VersionedLocatableAsset, VersionedLocationConverter, }, paras_registrar, paras_sudo_wrapper, prod_or_fast, slots, - traits::Leaser, + traits::{Leaser, OnSwap}, BalanceToU256, BlockHashCount, BlockLength, CurrencyToVote, SlowAdjustingFeeUpdate, U256ToBalance, }; @@ -80,9 +78,7 @@ use runtime_parachains::{ inclusion::{AggregateMessageOrigin, UmpQueueId}, initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras, paras_inherent as parachains_paras_inherent, reward_points as parachains_reward_points, - runtime_api_impl::{ - v7 as parachains_runtime_api_impl, vstaging as parachains_staging_runtime_api_impl, - }, + runtime_api_impl::v10 as parachains_runtime_api_impl, scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, }; @@ -97,19 +93,20 @@ use sp_runtime::{ Keccak256, OpaqueKeys, SaturatedConversion, Verify, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, BoundToRuntimeAppPublic, FixedU128, KeyTypeId, Perbill, Percent, Permill, - RuntimeAppPublic, + ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, Percent, Permill, }; use sp_staking::SessionIndex; #[cfg(any(feature = "std", test))] use sp_version::NativeVersion; use sp_version::RuntimeVersion; use xcm::{ - latest::{InteriorLocation, Junction, Junction::PalletInstance}, - VersionedLocation, + latest::prelude::*, IntoVersion, VersionedAssetId, VersionedAssets, VersionedLocation, + VersionedXcm, }; use xcm_builder::PayOverXcm; +use xcm_fee_payment_runtime_api::Error as XcmPaymentApiError; + pub use frame_system::Call as SystemCall; pub use pallet_balances::Call as BalancesCall; pub use pallet_election_provider_multi_phase::{Call as EPMCall, GeometricDepositBase}; @@ -154,7 +151,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westend"), impl_name: create_runtime_str!("parity-westend"), authoring_version: 2, - spec_version: 1_008_000, + spec_version: 1_009_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 24, @@ -411,46 +408,6 @@ parameter_types! { pub const Offset: BlockNumber = 0; } -#[derive(Clone, Debug, PartialEq, Eq, Encode, Decode)] -pub struct OldSessionKeys { - pub grandpa: ::Public, - pub babe: ::Public, - pub im_online: pallet_im_online::sr25519::AuthorityId, - pub para_validator: ::Public, - pub para_assignment: ::Public, - pub authority_discovery: ::Public, - pub beefy: ::Public, -} - -impl OpaqueKeys for OldSessionKeys { - type KeyTypeIdProviders = (); - fn key_ids() -> &'static [KeyTypeId] { - &[ - <::Public>::ID, - <::Public>::ID, - sp_core::crypto::key_types::IM_ONLINE, - <::Public>::ID, - <::Public>::ID, - <::Public>::ID, - <::Public>::ID, - ] - } - fn get_raw(&self, i: KeyTypeId) -> &[u8] { - match i { - <::Public>::ID => self.grandpa.as_ref(), - <::Public>::ID => self.babe.as_ref(), - sp_core::crypto::key_types::IM_ONLINE => self.im_online.as_ref(), - <::Public>::ID => self.para_validator.as_ref(), - <::Public>::ID => - self.para_assignment.as_ref(), - <::Public>::ID => - self.authority_discovery.as_ref(), - <::Public>::ID => self.beefy.as_ref(), - _ => &[], - } - } -} - impl_opaque_keys! { pub struct SessionKeys { pub grandpa: Grandpa, @@ -462,18 +419,6 @@ impl_opaque_keys! { } } -// remove this when removing `OldSessionKeys` -fn transform_session_keys(_v: AccountId, old: OldSessionKeys) -> SessionKeys { - SessionKeys { - grandpa: old.grandpa, - babe: old.babe, - para_validator: old.para_validator, - para_assignment: old.para_assignment, - authority_discovery: old.authority_discovery, - beefy: old.beefy, - } -} - impl pallet_session::Config for Runtime { type RuntimeEvent = RuntimeEvent; type ValidatorId = AccountId; @@ -1192,6 +1137,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = MessageQueueHeapSize; type MaxStale = MessageQueueMaxStale; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; #[cfg(not(feature = "runtime-benchmarks"))] type MessageProcessor = MessageProcessor; #[cfg(feature = "runtime-benchmarks")] @@ -1306,7 +1252,7 @@ impl paras_registrar::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; type RuntimeEvent = RuntimeEvent; type Currency = Balances; - type OnSwap = (Crowdloan, Slots); + type OnSwap = (Crowdloan, Slots, SwapLeases); type ParaDeposit = ParaDeposit; type DataDepositPerByte = RegistrarDataDepositPerByte; type WeightInfo = weights::runtime_common_paras_registrar::WeightInfo; @@ -1418,130 +1364,211 @@ impl pallet_asset_rate::Config for Runtime { type BenchmarkHelper = runtime_common::impls::benchmarks::AssetRateArguments; } -construct_runtime! { - pub enum Runtime - { - // Basic stuff; balances is uncallable initially. - System: frame_system = 0, - - // Babe must be before session. - Babe: pallet_babe = 1, - - Timestamp: pallet_timestamp = 2, - Indices: pallet_indices = 3, - Balances: pallet_balances = 4, - TransactionPayment: pallet_transaction_payment = 26, - - // Consensus support. - // Authorship must be before session in order to note author in the correct session and era. - Authorship: pallet_authorship = 5, - Staking: pallet_staking = 6, - Offences: pallet_offences = 7, - Historical: session_historical = 27, - - Session: pallet_session = 8, - Grandpa: pallet_grandpa = 10, - AuthorityDiscovery: pallet_authority_discovery = 12, - - // Utility module. - Utility: pallet_utility = 16, - - // Less simple identity module. - Identity: pallet_identity = 17, - - // Social recovery module. - Recovery: pallet_recovery = 18, - - // Vesting. Usable initially, but removed once all vesting is finished. - Vesting: pallet_vesting = 19, - - // System scheduler. - Scheduler: pallet_scheduler = 20, - - // Preimage registrar. - Preimage: pallet_preimage = 28, - - // Sudo. - Sudo: pallet_sudo = 21, - - // Proxy module. Late addition. - Proxy: pallet_proxy = 22, - - // Multisig module. Late addition. - Multisig: pallet_multisig = 23, - - // Election pallet. Only works with staking, but placed here to maintain indices. - ElectionProviderMultiPhase: pallet_election_provider_multi_phase = 24, - - // Provides a semi-sorted list of nominators for staking. - VoterList: pallet_bags_list:: = 25, - - // Nomination pools for staking. - NominationPools: pallet_nomination_pools = 29, - - // Fast unstake pallet: extension to staking. - FastUnstake: pallet_fast_unstake = 30, - - // OpenGov - ConvictionVoting: pallet_conviction_voting = 31, - Referenda: pallet_referenda = 32, - Origins: pallet_custom_origins = 35, - Whitelist: pallet_whitelist = 36, - - // Treasury - Treasury: pallet_treasury = 37, - - // Parachains pallets. Start indices at 40 to leave room. - ParachainsOrigin: parachains_origin = 41, - Configuration: parachains_configuration = 42, - ParasShared: parachains_shared = 43, - ParaInclusion: parachains_inclusion = 44, - ParaInherent: parachains_paras_inherent = 45, - ParaScheduler: parachains_scheduler = 46, - Paras: parachains_paras = 47, - Initializer: parachains_initializer = 48, - Dmp: parachains_dmp = 49, - // RIP Ump 50 - Hrmp: parachains_hrmp = 51, - ParaSessionInfo: parachains_session_info = 52, - ParasDisputes: parachains_disputes = 53, - ParasSlashing: parachains_slashing = 54, - OnDemandAssignmentProvider: parachains_assigner_on_demand = 56, - CoretimeAssignmentProvider: parachains_assigner_coretime = 57, - - // Parachain Onboarding Pallets. Start indices at 60 to leave room. - Registrar: paras_registrar = 60, - Slots: slots = 61, - ParasSudoWrapper: paras_sudo_wrapper = 62, - Auctions: auctions = 63, - Crowdloan: crowdloan = 64, - AssignedSlots: assigned_slots = 65, - Coretime: coretime = 66, - - // Pallet for sending XCM. - XcmPallet: pallet_xcm = 99, - - // Generalized message queue - MessageQueue: pallet_message_queue = 100, - - // Asset rate. - AssetRate: pallet_asset_rate = 101, - - // Root testing pallet. - RootTesting: pallet_root_testing = 102, - - // BEEFY Bridges support. - Beefy: pallet_beefy = 200, - // MMR leaf construction must be after session in order to have a leaf's next_auth_set - // refer to block. See issue polkadot-fellows/runtimes#160 for details. - Mmr: pallet_mmr = 201, - BeefyMmrLeaf: pallet_beefy_mmr = 202, - - // Pallet for migrating Identity to a parachain. To be removed post-migration. - IdentityMigrator: identity_migrator = 248, +// Notify `coretime` pallet when a lease swap occurs +pub struct SwapLeases; +impl OnSwap for SwapLeases { + fn on_swap(one: ParaId, other: ParaId) { + coretime::Pallet::::on_legacy_lease_swap(one, other); } } +#[frame_support::runtime(legacy_ordering)] +mod runtime { + #[runtime::runtime] + #[runtime::derive( + RuntimeCall, + RuntimeEvent, + RuntimeError, + RuntimeOrigin, + RuntimeFreezeReason, + RuntimeHoldReason, + RuntimeSlashReason, + RuntimeLockId, + RuntimeTask + )] + pub struct Runtime; + + // Basic stuff; balances is uncallable initially. + #[runtime::pallet_index(0)] + pub type System = frame_system; + + // Babe must be before session. + #[runtime::pallet_index(1)] + pub type Babe = pallet_babe; + + #[runtime::pallet_index(2)] + pub type Timestamp = pallet_timestamp; + #[runtime::pallet_index(3)] + pub type Indices = pallet_indices; + #[runtime::pallet_index(4)] + pub type Balances = pallet_balances; + #[runtime::pallet_index(26)] + pub type TransactionPayment = pallet_transaction_payment; + + // Consensus support. + // Authorship must be before session in order to note author in the correct session and era. + #[runtime::pallet_index(5)] + pub type Authorship = pallet_authorship; + #[runtime::pallet_index(6)] + pub type Staking = pallet_staking; + #[runtime::pallet_index(7)] + pub type Offences = pallet_offences; + #[runtime::pallet_index(27)] + pub type Historical = session_historical; + + #[runtime::pallet_index(8)] + pub type Session = pallet_session; + #[runtime::pallet_index(10)] + pub type Grandpa = pallet_grandpa; + #[runtime::pallet_index(12)] + pub type AuthorityDiscovery = pallet_authority_discovery; + + // Utility module. + #[runtime::pallet_index(16)] + pub type Utility = pallet_utility; + + // Less simple identity module. + #[runtime::pallet_index(17)] + pub type Identity = pallet_identity; + + // Social recovery module. + #[runtime::pallet_index(18)] + pub type Recovery = pallet_recovery; + + // Vesting. Usable initially, but removed once all vesting is finished. + #[runtime::pallet_index(19)] + pub type Vesting = pallet_vesting; + + // System scheduler. + #[runtime::pallet_index(20)] + pub type Scheduler = pallet_scheduler; + + // Preimage registrar. + #[runtime::pallet_index(28)] + pub type Preimage = pallet_preimage; + + // Sudo. + #[runtime::pallet_index(21)] + pub type Sudo = pallet_sudo; + + // Proxy module. Late addition. + #[runtime::pallet_index(22)] + pub type Proxy = pallet_proxy; + + // Multisig module. Late addition. + #[runtime::pallet_index(23)] + pub type Multisig = pallet_multisig; + + // Election pallet. Only works with staking, but placed here to maintain indices. + #[runtime::pallet_index(24)] + pub type ElectionProviderMultiPhase = pallet_election_provider_multi_phase; + + // Provides a semi-sorted list of nominators for staking. + #[runtime::pallet_index(25)] + pub type VoterList = pallet_bags_list; + + // Nomination pools for staking. + #[runtime::pallet_index(29)] + pub type NominationPools = pallet_nomination_pools; + + // Fast unstake pallet = extension to staking. + #[runtime::pallet_index(30)] + pub type FastUnstake = pallet_fast_unstake; + + // OpenGov + #[runtime::pallet_index(31)] + pub type ConvictionVoting = pallet_conviction_voting; + #[runtime::pallet_index(32)] + pub type Referenda = pallet_referenda; + #[runtime::pallet_index(35)] + pub type Origins = pallet_custom_origins; + #[runtime::pallet_index(36)] + pub type Whitelist = pallet_whitelist; + + // Treasury + #[runtime::pallet_index(37)] + pub type Treasury = pallet_treasury; + + // Parachains pallets. Start indices at 40 to leave room. + #[runtime::pallet_index(41)] + pub type ParachainsOrigin = parachains_origin; + #[runtime::pallet_index(42)] + pub type Configuration = parachains_configuration; + #[runtime::pallet_index(43)] + pub type ParasShared = parachains_shared; + #[runtime::pallet_index(44)] + pub type ParaInclusion = parachains_inclusion; + #[runtime::pallet_index(45)] + pub type ParaInherent = parachains_paras_inherent; + #[runtime::pallet_index(46)] + pub type ParaScheduler = parachains_scheduler; + #[runtime::pallet_index(47)] + pub type Paras = parachains_paras; + #[runtime::pallet_index(48)] + pub type Initializer = parachains_initializer; + #[runtime::pallet_index(49)] + pub type Dmp = parachains_dmp; + // RIP Ump 50 + #[runtime::pallet_index(51)] + pub type Hrmp = parachains_hrmp; + #[runtime::pallet_index(52)] + pub type ParaSessionInfo = parachains_session_info; + #[runtime::pallet_index(53)] + pub type ParasDisputes = parachains_disputes; + #[runtime::pallet_index(54)] + pub type ParasSlashing = parachains_slashing; + #[runtime::pallet_index(56)] + pub type OnDemandAssignmentProvider = parachains_assigner_on_demand; + #[runtime::pallet_index(57)] + pub type CoretimeAssignmentProvider = parachains_assigner_coretime; + + // Parachain Onboarding Pallets. Start indices at 60 to leave room. + #[runtime::pallet_index(60)] + pub type Registrar = paras_registrar; + #[runtime::pallet_index(61)] + pub type Slots = slots; + #[runtime::pallet_index(62)] + pub type ParasSudoWrapper = paras_sudo_wrapper; + #[runtime::pallet_index(63)] + pub type Auctions = auctions; + #[runtime::pallet_index(64)] + pub type Crowdloan = crowdloan; + #[runtime::pallet_index(65)] + pub type AssignedSlots = assigned_slots; + #[runtime::pallet_index(66)] + pub type Coretime = coretime; + + // Pallet for sending XCM. + #[runtime::pallet_index(99)] + pub type XcmPallet = pallet_xcm; + + // Generalized message queue + #[runtime::pallet_index(100)] + pub type MessageQueue = pallet_message_queue; + + // Asset rate. + #[runtime::pallet_index(101)] + pub type AssetRate = pallet_asset_rate; + + // Root testing pallet. + #[runtime::pallet_index(102)] + pub type RootTesting = pallet_root_testing; + + // BEEFY Bridges support. + #[runtime::pallet_index(200)] + pub type Beefy = pallet_beefy; + // MMR leaf construction must be after session in order to have a leaf's next_auth_set + // refer to block. See issue polkadot-fellows/runtimes#160 for details. + #[runtime::pallet_index(201)] + pub type Mmr = pallet_mmr; + #[runtime::pallet_index(202)] + pub type BeefyMmrLeaf = pallet_beefy_mmr; + + // Pallet for migrating Identity to a parachain. To be removed post-migration. + #[runtime::pallet_index(248)] + pub type IdentityMigrator = identity_migrator; +} + /// The address format for describing accounts. pub type Address = sp_runtime::MultiAddress; /// Block header type as expected by this runtime. @@ -1581,8 +1608,6 @@ pub type Migrations = migrations::Unreleased; #[allow(deprecated, missing_docs)] pub mod migrations { use super::*; - #[cfg(feature = "try-runtime")] - use sp_core::crypto::ByteArray; pub struct GetLegacyLeaseImpl; impl coretime::migration::GetLegacyLease for GetLegacyLeaseImpl { @@ -1590,11 +1615,11 @@ pub mod migrations { let now = frame_system::Pallet::::block_number(); let lease = slots::Pallet::::lease(para); if lease.is_empty() { - return None + return None; } // Lease not yet started, ignore: if lease.iter().any(Option::is_none) { - return None + return None; } let (index, _) = as Leaser>::lease_period_index(now)?; @@ -1602,81 +1627,6 @@ pub mod migrations { } } - parameter_types! { - pub const ImOnlinePalletName: &'static str = "ImOnline"; - } - - /// Upgrade Session keys to exclude `ImOnline` key. - /// When this is removed, should also remove `OldSessionKeys`. - pub struct UpgradeSessionKeys; - const UPGRADE_SESSION_KEYS_FROM_SPEC: u32 = 104000; - - impl frame_support::traits::OnRuntimeUpgrade for UpgradeSessionKeys { - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { - if System::last_runtime_upgrade_spec_version() > UPGRADE_SESSION_KEYS_FROM_SPEC { - log::warn!(target: "runtime::session_keys", "Skipping session keys migration pre-upgrade check due to spec version (already applied?)"); - return Ok(Vec::new()) - } - - log::info!(target: "runtime::session_keys", "Collecting pre-upgrade session keys state"); - let key_ids = SessionKeys::key_ids(); - frame_support::ensure!( - key_ids.into_iter().find(|&k| *k == sp_core::crypto::key_types::IM_ONLINE) == None, - "New session keys contain the ImOnline key that should have been removed", - ); - let storage_key = pallet_session::QueuedKeys::::hashed_key(); - let mut state: Vec = Vec::new(); - frame_support::storage::unhashed::get::>( - &storage_key, - ) - .ok_or::("Queued keys are not available".into())? - .into_iter() - .for_each(|(id, keys)| { - state.extend_from_slice(id.as_slice()); - for key_id in key_ids { - state.extend_from_slice(keys.get_raw(*key_id)); - } - }); - frame_support::ensure!(state.len() > 0, "Queued keys are not empty before upgrade"); - Ok(state) - } - - fn on_runtime_upgrade() -> Weight { - if System::last_runtime_upgrade_spec_version() > UPGRADE_SESSION_KEYS_FROM_SPEC { - log::warn!("Skipping session keys upgrade: already applied"); - return ::DbWeight::get().reads(1) - } - log::info!("Upgrading session keys"); - Session::upgrade_keys::(transform_session_keys); - Perbill::from_percent(50) * BlockWeights::get().max_block - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade(old_state: alloc::vec::Vec) -> Result<(), sp_runtime::TryRuntimeError> { - if System::last_runtime_upgrade_spec_version() > UPGRADE_SESSION_KEYS_FROM_SPEC { - log::warn!(target: "runtime::session_keys", "Skipping session keys migration post-upgrade check due to spec version (already applied?)"); - return Ok(()) - } - - let key_ids = SessionKeys::key_ids(); - let mut new_state: Vec = Vec::new(); - pallet_session::QueuedKeys::::get().into_iter().for_each(|(id, keys)| { - new_state.extend_from_slice(id.as_slice()); - for key_id in key_ids { - new_state.extend_from_slice(keys.get_raw(*key_id)); - } - }); - frame_support::ensure!(new_state.len() > 0, "Queued keys are not empty after upgrade"); - frame_support::ensure!( - old_state == new_state, - "Pre-upgrade and post-upgrade keys do not match!" - ); - log::info!(target: "runtime::session_keys", "Session keys migrated successfully"); - Ok(()) - } - } - // We don't have a limit in the Relay Chain. const IDENTITY_MIGRATION_KEY_LIMIT: u64 = u64::MAX; @@ -1693,11 +1643,6 @@ pub mod migrations { pallet_grandpa::migrations::MigrateV4ToV5, parachains_configuration::migration::v10::MigrateToV10, pallet_nomination_pools::migration::unversioned::TotalValueLockedSync, - UpgradeSessionKeys, - frame_support::migrations::RemovePallet< - ImOnlinePalletName, - ::DbWeight, - >, // Migrate Identity pallet for Usernames pallet_identity::migration::versioned::V0ToV1, parachains_configuration::migration::v11::MigrateToV11, @@ -1710,6 +1655,7 @@ pub mod migrations { crate::xcm_config::XcmRouter, GetLegacyLeaseImpl, >, + parachains_inclusion::migration::MigrateToV1, ); } @@ -1848,12 +1794,6 @@ sp_api::impl_runtime_apis! { impl offchain_primitives::OffchainWorkerApi for Runtime { fn offchain_worker(header: &::Header) { - use sp_runtime::{traits::Header, DigestItem}; - - if header.digest().logs().iter().any(|di| di == &DigestItem::RuntimeEnvironmentUpdated) { - pallet_im_online::migration::clear_offchain_storage(Session::validators().len() as u32); - } - Executive::offchain_worker(header) } } @@ -2003,21 +1943,21 @@ sp_api::impl_runtime_apis! { } fn approval_voting_params() -> ApprovalVotingParams { - parachains_staging_runtime_api_impl::approval_voting_params::() + parachains_runtime_api_impl::approval_voting_params::() } fn disabled_validators() -> Vec { - parachains_staging_runtime_api_impl::disabled_validators::() + parachains_runtime_api_impl::disabled_validators::() } fn node_features() -> NodeFeatures { - parachains_staging_runtime_api_impl::node_features::() + parachains_runtime_api_impl::node_features::() } } impl beefy_primitives::BeefyApi for Runtime { fn beefy_genesis() -> Option { - Beefy::genesis_block() + pallet_beefy::GenesisBlock::::get() } fn validator_set() -> Option> { @@ -2054,11 +1994,11 @@ sp_api::impl_runtime_apis! { impl mmr::MmrApi for Runtime { fn mmr_root() -> Result { - Ok(Mmr::mmr_root()) + Ok(pallet_mmr::RootHash::::get()) } fn mmr_leaf_count() -> Result { - Ok(Mmr::mmr_leaves()) + Ok(pallet_mmr::NumberOfLeaves::::get()) } fn generate_proof( @@ -2252,6 +2192,37 @@ sp_api::impl_runtime_apis! { } } + impl xcm_fee_payment_runtime_api::XcmPaymentApi for Runtime { + fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { + if !matches!(xcm_version, 3 | 4) { + return Err(XcmPaymentApiError::UnhandledXcmVersion); + } + Ok([VersionedAssetId::V4(xcm_config::TokenLocation::get().into())] + .into_iter() + .filter_map(|asset| asset.into_version(xcm_version).ok()) + .collect()) + } + + fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { + let local_asset = VersionedAssetId::V4(xcm_config::TokenLocation::get().into()); + let asset = asset + .into_version(4) + .map_err(|_| XcmPaymentApiError::VersionedConversionFailed)?; + + if asset != local_asset { return Err(XcmPaymentApiError::AssetNotFound); } + + Ok(WeightToFee::weight_to_fee(&weight)) + } + + fn query_xcm_weight(message: VersionedXcm<()>) -> Result { + XcmPallet::query_xcm_weight(message) + } + + fn query_delivery_fees(destination: VersionedLocation, message: VersionedXcm<()>) -> Result { + XcmPallet::query_delivery_fees(destination, message) + } + } + impl pallet_nomination_pools_runtime_api::NominationPoolsApi< Block, AccountId, @@ -2570,7 +2541,7 @@ mod remote_tests { #[tokio::test] async fn run_migrations() { if var("RUN_MIGRATION_TESTS").is_err() { - return + return; } sp_tracing::try_init_simple(); diff --git a/polkadot/runtime/westend/src/tests.rs b/polkadot/runtime/westend/src/tests.rs index 9f9963160590..4acb81e963b2 100644 --- a/polkadot/runtime/westend/src/tests.rs +++ b/polkadot/runtime/westend/src/tests.rs @@ -21,7 +21,6 @@ use std::collections::HashSet; use crate::*; use frame_support::traits::WhitelistedStorageKeys; use sp_core::hexdisplay::HexDisplay; -use xcm::latest::prelude::*; #[test] fn remove_keys_weight_is_sensible() { @@ -55,11 +54,12 @@ fn sanity_check_teleport_assets_weight() { // Usually when XCM runs into an issue, it will return a weight of `Weight::MAX`, // so this test will certainly ensure that this problem does not occur. use frame_support::dispatch::GetDispatchInfo; - let weight = pallet_xcm::Call::::teleport_assets { + let weight = pallet_xcm::Call::::limited_teleport_assets { dest: Box::new(Here.into()), beneficiary: Box::new(Here.into()), assets: Box::new((Here, 200_000).into()), fee_asset_item: 0, + weight_limit: Unlimited, } .get_dispatch_info() .weight; diff --git a/polkadot/runtime/westend/src/weights/pallet_staking.rs b/polkadot/runtime/westend/src/weights/pallet_staking.rs index 7a641e36a126..393fa0b37176 100644 --- a/polkadot/runtime/westend/src/weights/pallet_staking.rs +++ b/polkadot/runtime/westend/src/weights/pallet_staking.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_staking` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-01-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-8idpd4bs-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -50,22 +50,22 @@ pub struct WeightInfo(PhantomData); impl pallet_staking::WeightInfo for WeightInfo { /// Storage: `Staking::Bonded` (r:1 w:1) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:0 w:1) - /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::Payee` (r:0 w:1) /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn bond() -> Weight { // Proof Size summary in bytes: - // Measured: `894` + // Measured: `1009` // Estimated: `4764` - // Minimum execution time: 37_340_000 picoseconds. - Weight::from_parts(38_930_000, 0) + // Minimum execution time: 40_585_000 picoseconds. + Weight::from_parts(41_800_000, 0) .saturating_add(Weight::from_parts(0, 4764)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `Staking::Bonded` (r:1 w:0) @@ -84,22 +84,22 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1921` // Estimated: `8877` - // Minimum execution time: 80_630_000 picoseconds. - Weight::from_parts(82_196_000, 0) + // Minimum execution time: 81_809_000 picoseconds. + Weight::from_parts(84_387_000, 0) .saturating_add(Weight::from_parts(0, 8877)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(7)) } /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:0) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) /// Storage: `Staking::MinNominatorBond` (r:1 w:0) /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:1 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -112,43 +112,45 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `2128` // Estimated: `8877` - // Minimum execution time: 83_523_000 picoseconds. - Weight::from_parts(86_639_000, 0) + // Minimum execution time: 89_419_000 picoseconds. + Weight::from_parts(91_237_000, 0) .saturating_add(Weight::from_parts(0, 8877)) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(7)) } /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Staking::CurrentEra` (r:1 w:0) - /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:0) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1075` + // Measured: `1223` // Estimated: `4764` - // Minimum execution time: 38_636_000 picoseconds. - Weight::from_parts(40_399_283, 0) + // Minimum execution time: 45_152_000 picoseconds. + Weight::from_parts(46_460_819, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 869 - .saturating_add(Weight::from_parts(37_752, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(5)) + // Standard Error: 972 + .saturating_add(Weight::from_parts(55_473, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::SlashingSpans` (r:1 w:1) /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Staking::Bonded` (r:1 w:1) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -174,11 +176,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `2127 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 81_301_000 picoseconds. - Weight::from_parts(88_609_205, 0) + // Minimum execution time: 82_762_000 picoseconds. + Weight::from_parts(91_035_077, 0) .saturating_add(Weight::from_parts(0, 6248)) - // Standard Error: 3_388 - .saturating_add(Weight::from_parts(1_253_692, 0).saturating_mul(s.into())) + // Standard Error: 3_771 + .saturating_add(Weight::from_parts(1_217_871, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(13)) .saturating_add(T::DbWeight::get().writes(11)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -186,6 +188,8 @@ impl pallet_staking::WeightInfo for WeightInfo { } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::MinValidatorBond` (r:1 w:0) /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::MinCommission` (r:1 w:0) @@ -196,8 +200,6 @@ impl pallet_staking::WeightInfo for WeightInfo { /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:0) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:1 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListNodes` (r:1 w:1) /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:1 w:1) @@ -210,33 +212,37 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1301` // Estimated: `4556` - // Minimum execution time: 47_292_000 picoseconds. - Weight::from_parts(48_566_000, 0) + // Minimum execution time: 50_555_000 picoseconds. + Weight::from_parts(52_052_000, 0) .saturating_add(Weight::from_parts(0, 4556)) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:128 w:128) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) /// The range of component `k` is `[1, 128]`. fn kick(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1243 + k * (569 ±0)` + // Measured: `1778 + k * (572 ±0)` // Estimated: `4556 + k * (3033 ±0)` - // Minimum execution time: 28_840_000 picoseconds. - Weight::from_parts(27_510_817, 0) + // Minimum execution time: 35_037_000 picoseconds. + Weight::from_parts(35_081_878, 0) .saturating_add(Weight::from_parts(0, 4556)) - // Standard Error: 6_603 - .saturating_add(Weight::from_parts(6_268_853, 0).saturating_mul(k.into())) - .saturating_add(T::DbWeight::get().reads(1)) + // Standard Error: 5_473 + .saturating_add(Weight::from_parts(6_667_924, 0).saturating_mul(k.into())) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) .saturating_add(Weight::from_parts(0, 3033).saturating_mul(k.into())) } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::MinNominatorBond` (r:1 w:0) /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:1) @@ -247,8 +253,6 @@ impl pallet_staking::WeightInfo for WeightInfo { /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:1 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListNodes` (r:2 w:2) /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:1 w:1) @@ -262,11 +266,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1797 + n * (102 ±0)` // Estimated: `6248 + n * (2520 ±0)` - // Minimum execution time: 57_537_000 picoseconds. - Weight::from_parts(55_854_233, 0) + // Minimum execution time: 62_098_000 picoseconds. + Weight::from_parts(60_154_061, 0) .saturating_add(Weight::from_parts(0, 6248)) - // Standard Error: 14_427 - .saturating_add(Weight::from_parts(3_844_957, 0).saturating_mul(n.into())) + // Standard Error: 19_257 + .saturating_add(Weight::from_parts(3_839_855, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(6)) @@ -274,6 +278,8 @@ impl pallet_staking::WeightInfo for WeightInfo { } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Validators` (r:1 w:0) /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:1) @@ -288,12 +294,12 @@ impl pallet_staking::WeightInfo for WeightInfo { /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill() -> Weight { // Proof Size summary in bytes: - // Measured: `1581` + // Measured: `1747` // Estimated: `6248` - // Minimum execution time: 49_997_000 picoseconds. - Weight::from_parts(51_266_000, 0) + // Minimum execution time: 54_993_000 picoseconds. + Weight::from_parts(56_698_000, 0) .saturating_add(Weight::from_parts(0, 6248)) - .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(6)) } /// Storage: `Staking::Ledger` (r:1 w:0) @@ -306,40 +312,40 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `865` // Estimated: `4556` - // Minimum execution time: 15_342_000 picoseconds. - Weight::from_parts(15_970_000, 0) + // Minimum execution time: 18_100_000 picoseconds. + Weight::from_parts(18_547_000, 0) .saturating_add(Weight::from_parts(0, 4556)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Staking::Payee` (r:1 w:1) - /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:1 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn update_payee() -> Weight { // Proof Size summary in bytes: // Measured: `932` // Estimated: `4556` - // Minimum execution time: 20_719_000 picoseconds. - Weight::from_parts(21_373_000, 0) + // Minimum execution time: 23_428_000 picoseconds. + Weight::from_parts(24_080_000, 0) .saturating_add(Weight::from_parts(0, 4556)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Staking::Bonded` (r:1 w:1) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:1 w:2) + /// Storage: `Staking::Ledger` (r:2 w:2) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) fn set_controller() -> Weight { // Proof Size summary in bytes: // Measured: `865` - // Estimated: `4556` - // Minimum execution time: 18_237_000 picoseconds. - Weight::from_parts(18_896_000, 0) - .saturating_add(Weight::from_parts(0, 4556)) - .saturating_add(T::DbWeight::get().reads(2)) + // Estimated: `8122` + // Minimum execution time: 21_159_000 picoseconds. + Weight::from_parts(21_706_000, 0) + .saturating_add(Weight::from_parts(0, 8122)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Staking::ValidatorCount` (r:0 w:1) @@ -348,8 +354,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_946_000 picoseconds. - Weight::from_parts(2_131_000, 0) + // Minimum execution time: 1_910_000 picoseconds. + Weight::from_parts(2_003_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -359,8 +365,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_840_000 picoseconds. - Weight::from_parts(7_208_000, 0) + // Minimum execution time: 7_076_000 picoseconds. + Weight::from_parts(7_349_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -370,8 +376,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_812_000 picoseconds. - Weight::from_parts(7_254_000, 0) + // Minimum execution time: 7_067_000 picoseconds. + Weight::from_parts(7_389_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -381,8 +387,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_787_000 picoseconds. - Weight::from_parts(7_206_000, 0) + // Minimum execution time: 7_148_000 picoseconds. + Weight::from_parts(7_446_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -393,32 +399,32 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_045_000 picoseconds. - Weight::from_parts(2_281_841, 0) + // Minimum execution time: 2_025_000 picoseconds. + Weight::from_parts(2_229_953, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 70 - .saturating_add(Weight::from_parts(11_592, 0).saturating_mul(v.into())) + // Standard Error: 67 + .saturating_add(Weight::from_parts(11_785, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `Staking::Ledger` (r:751 w:1502) + /// Storage: `Staking::Ledger` (r:1502 w:1502) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:751 w:751) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Payee` (r:751 w:0) /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:0 w:751) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// The range of component `i` is `[0, 751]`. fn deprecate_controller_batch(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `668 + i * (148 ±0)` - // Estimated: `990 + i * (3566 ±0)` - // Minimum execution time: 1_657_000 picoseconds. - Weight::from_parts(1_702_000, 0) + // Measured: `680 + i * (227 ±0)` + // Estimated: `990 + i * (7132 ±0)` + // Minimum execution time: 4_321_000 picoseconds. + Weight::from_parts(4_407_000, 0) .saturating_add(Weight::from_parts(0, 990)) - // Standard Error: 20_041 - .saturating_add(Weight::from_parts(13_165_254, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(i.into()))) + // Standard Error: 37_239 + .saturating_add(Weight::from_parts(21_300_598, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(i.into()))) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(i.into()))) - .saturating_add(Weight::from_parts(0, 3566).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(0, 7132).saturating_mul(i.into())) } /// Storage: `Staking::SlashingSpans` (r:1 w:1) /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -453,11 +459,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `2127 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 78_774_000 picoseconds. - Weight::from_parts(85_770_713, 0) + // Minimum execution time: 78_908_000 picoseconds. + Weight::from_parts(84_886_373, 0) .saturating_add(Weight::from_parts(0, 6248)) - // Standard Error: 2_815 - .saturating_add(Weight::from_parts(1_244_494, 0).saturating_mul(s.into())) + // Standard Error: 3_376 + .saturating_add(Weight::from_parts(1_217_850, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(13)) .saturating_add(T::DbWeight::get().writes(12)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -470,11 +476,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `66639` // Estimated: `70104` - // Minimum execution time: 129_905_000 picoseconds. - Weight::from_parts(932_195_554, 0) + // Minimum execution time: 136_389_000 picoseconds. + Weight::from_parts(1_207_241_524, 0) .saturating_add(Weight::from_parts(0, 70104)) - // Standard Error: 57_492 - .saturating_add(Weight::from_parts(4_826_754, 0).saturating_mul(s.into())) + // Standard Error: 77_138 + .saturating_add(Weight::from_parts(6_443_948, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -511,11 +517,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `8249 + n * (396 ±0)` // Estimated: `10779 + n * (3774 ±0)` - // Minimum execution time: 127_094_000 picoseconds. - Weight::from_parts(160_088_053, 0) + // Minimum execution time: 130_222_000 picoseconds. + Weight::from_parts(167_236_150, 0) .saturating_add(Weight::from_parts(0, 10779)) - // Standard Error: 32_978 - .saturating_add(Weight::from_parts(39_845_710, 0).saturating_mul(n.into())) + // Standard Error: 34_051 + .saturating_add(Weight::from_parts(39_899_917, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(14)) .saturating_add(T::DbWeight::get().reads((6_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(4)) @@ -539,11 +545,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1922 + l * (5 ±0)` // Estimated: `8877` - // Minimum execution time: 75_672_000 picoseconds. - Weight::from_parts(78_708_335, 0) + // Minimum execution time: 79_136_000 picoseconds. + Weight::from_parts(82_129_497, 0) .saturating_add(Weight::from_parts(0, 8877)) - // Standard Error: 3_387 - .saturating_add(Weight::from_parts(37_084, 0).saturating_mul(l.into())) + // Standard Error: 3_867 + .saturating_add(Weight::from_parts(75_156, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(7)) } @@ -578,11 +584,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `2127 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 87_991_000 picoseconds. - Weight::from_parts(90_272_005, 0) + // Minimum execution time: 89_375_000 picoseconds. + Weight::from_parts(91_224_907, 0) .saturating_add(Weight::from_parts(0, 6248)) - // Standard Error: 2_815 - .saturating_add(Weight::from_parts(1_232_322, 0).saturating_mul(s.into())) + // Standard Error: 3_424 + .saturating_add(Weight::from_parts(1_219_542, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(11)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -627,14 +633,14 @@ impl pallet_staking::WeightInfo for WeightInfo { fn new_era(v: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + n * (716 ±0) + v * (3594 ±0)` - // Estimated: `456136 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 528_862_000 picoseconds. - Weight::from_parts(534_620_000, 0) + // Estimated: `456136 + n * (3566 ±4) + v * (3566 ±0)` + // Minimum execution time: 520_905_000 picoseconds. + Weight::from_parts(523_771_000, 0) .saturating_add(Weight::from_parts(0, 456136)) - // Standard Error: 2_005_553 - .saturating_add(Weight::from_parts(65_586_008, 0).saturating_mul(v.into())) - // Standard Error: 199_842 - .saturating_add(Weight::from_parts(18_155_389, 0).saturating_mul(n.into())) + // Standard Error: 2_142_714 + .saturating_add(Weight::from_parts(68_631_588, 0).saturating_mul(v.into())) + // Standard Error: 213_509 + .saturating_add(Weight::from_parts(19_343_025, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(184)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -665,13 +671,13 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `3108 + n * (907 ±0) + v * (391 ±0)` // Estimated: `456136 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 33_532_110_000 picoseconds. - Weight::from_parts(33_926_321_000, 0) + // Minimum execution time: 36_848_619_000 picoseconds. + Weight::from_parts(37_362_442_000, 0) .saturating_add(Weight::from_parts(0, 456136)) - // Standard Error: 374_134 - .saturating_add(Weight::from_parts(4_627_629, 0).saturating_mul(v.into())) - // Standard Error: 374_134 - .saturating_add(Weight::from_parts(4_068_168, 0).saturating_mul(n.into())) + // Standard Error: 415_031 + .saturating_add(Weight::from_parts(5_204_987, 0).saturating_mul(v.into())) + // Standard Error: 415_031 + .saturating_add(Weight::from_parts(4_132_636, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(179)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -688,11 +694,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `946 + v * (50 ±0)` // Estimated: `3510 + v * (2520 ±0)` - // Minimum execution time: 2_395_956_000 picoseconds. - Weight::from_parts(88_416_870, 0) + // Minimum execution time: 2_512_817_000 picoseconds. + Weight::from_parts(119_401_374, 0) .saturating_add(Weight::from_parts(0, 3510)) - // Standard Error: 8_731 - .saturating_add(Weight::from_parts(4_750_956, 0).saturating_mul(v.into())) + // Standard Error: 8_463 + .saturating_add(Weight::from_parts(4_860_364, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(v.into())) @@ -715,8 +721,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_761_000 picoseconds. - Weight::from_parts(4_013_000, 0) + // Minimum execution time: 3_686_000 picoseconds. + Weight::from_parts(3_881_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(7)) } @@ -738,8 +744,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_325_000 picoseconds. - Weight::from_parts(3_519_000, 0) + // Minimum execution time: 3_143_000 picoseconds. + Weight::from_parts(3_424_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(7)) } @@ -769,8 +775,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1870` // Estimated: `6248` - // Minimum execution time: 63_583_000 picoseconds. - Weight::from_parts(65_917_000, 0) + // Minimum execution time: 66_946_000 picoseconds. + Weight::from_parts(69_382_000, 0) .saturating_add(Weight::from_parts(0, 6248)) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(6)) @@ -783,8 +789,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `658` // Estimated: `3510` - // Minimum execution time: 10_975_000 picoseconds. - Weight::from_parts(11_328_000, 0) + // Minimum execution time: 11_278_000 picoseconds. + Weight::from_parts(11_603_000, 0) .saturating_add(Weight::from_parts(0, 3510)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -795,9 +801,29 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_954_000 picoseconds. - Weight::from_parts(2_081_000, 0) + // Minimum execution time: 1_963_000 picoseconds. + Weight::from_parts(2_077_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + fn restore_ledger() -> Weight { + // Proof Size summary in bytes: + // Measured: `1014` + // Estimated: `4764` + // Minimum execution time: 40_258_000 picoseconds. + Weight::from_parts(41_210_000, 0) + .saturating_add(Weight::from_parts(0, 4764)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } } diff --git a/polkadot/runtime/westend/src/weights/pallet_xcm.rs b/polkadot/runtime/westend/src/weights/pallet_xcm.rs index 10725cecf249..80bc551ba1e2 100644 --- a/polkadot/runtime/westend/src/weights/pallet_xcm.rs +++ b/polkadot/runtime/westend/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -60,8 +60,26 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `147` // Estimated: `3612` - // Minimum execution time: 25_725_000 picoseconds. - Weight::from_parts(26_174_000, 0) + // Minimum execution time: 24_535_000 picoseconds. + Weight::from_parts(25_618_000, 0) + .saturating_add(Weight::from_parts(0, 3612)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn send_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `147` + // Estimated: `3612` + // Minimum execution time: 25_376_000 picoseconds. + Weight::from_parts(26_180_000, 0) .saturating_add(Weight::from_parts(0, 3612)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -80,8 +98,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `250` // Estimated: `6196` - // Minimum execution time: 113_140_000 picoseconds. - Weight::from_parts(116_204_000, 0) + // Minimum execution time: 108_786_000 picoseconds. + Weight::from_parts(112_208_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -100,8 +118,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `302` // Estimated: `6196` - // Minimum execution time: 108_571_000 picoseconds. - Weight::from_parts(110_650_000, 0) + // Minimum execution time: 105_190_000 picoseconds. + Weight::from_parts(107_140_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -120,8 +138,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `250` // Estimated: `6196` - // Minimum execution time: 111_836_000 picoseconds. - Weight::from_parts(114_435_000, 0) + // Minimum execution time: 109_027_000 picoseconds. + Weight::from_parts(111_404_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -136,14 +154,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn execute_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } /// Storage: `XcmPallet::SupportedVersion` (r:0 w:1) /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_160_000 picoseconds. - Weight::from_parts(7_477_000, 0) + // Minimum execution time: 6_668_000 picoseconds. + Weight::from_parts(7_013_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -151,8 +179,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_934_000 picoseconds. - Weight::from_parts(2_053_000, 0) + // Minimum execution time: 1_740_000 picoseconds. + Weight::from_parts(1_884_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) @@ -173,8 +201,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `147` // Estimated: `3612` - // Minimum execution time: 31_123_000 picoseconds. - Weight::from_parts(31_798_000, 0) + // Minimum execution time: 30_200_000 picoseconds. + Weight::from_parts(30_768_000, 0) .saturating_add(Weight::from_parts(0, 3612)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) @@ -195,8 +223,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `327` // Estimated: `3792` - // Minimum execution time: 35_175_000 picoseconds. - Weight::from_parts(36_098_000, 0) + // Minimum execution time: 33_928_000 picoseconds. + Weight::from_parts(35_551_000, 0) .saturating_add(Weight::from_parts(0, 3792)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) @@ -207,8 +235,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_974_000 picoseconds. - Weight::from_parts(2_096_000, 0) + // Minimum execution time: 1_759_000 picoseconds. + Weight::from_parts(1_880_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -218,8 +246,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `22` // Estimated: `13387` - // Minimum execution time: 16_626_000 picoseconds. - Weight::from_parts(17_170_000, 0) + // Minimum execution time: 16_507_000 picoseconds. + Weight::from_parts(17_219_000, 0) .saturating_add(Weight::from_parts(0, 13387)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -230,8 +258,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `26` // Estimated: `13391` - // Minimum execution time: 16_937_000 picoseconds. - Weight::from_parts(17_447_000, 0) + // Minimum execution time: 16_633_000 picoseconds. + Weight::from_parts(16_889_000, 0) .saturating_add(Weight::from_parts(0, 13391)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -242,8 +270,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `40` // Estimated: `15880` - // Minimum execution time: 19_157_000 picoseconds. - Weight::from_parts(19_659_000, 0) + // Minimum execution time: 19_297_000 picoseconds. + Weight::from_parts(19_820_000, 0) .saturating_add(Weight::from_parts(0, 15880)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -261,8 +289,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `183` // Estimated: `6123` - // Minimum execution time: 30_699_000 picoseconds. - Weight::from_parts(31_537_000, 0) + // Minimum execution time: 30_364_000 picoseconds. + Weight::from_parts(31_122_000, 0) .saturating_add(Weight::from_parts(0, 6123)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) @@ -273,8 +301,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `69` // Estimated: `10959` - // Minimum execution time: 12_303_000 picoseconds. - Weight::from_parts(12_670_000, 0) + // Minimum execution time: 11_997_000 picoseconds. + Weight::from_parts(12_392_000, 0) .saturating_add(Weight::from_parts(0, 10959)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -284,8 +312,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `33` // Estimated: `13398` - // Minimum execution time: 17_129_000 picoseconds. - Weight::from_parts(17_668_000, 0) + // Minimum execution time: 16_894_000 picoseconds. + Weight::from_parts(17_452_000, 0) .saturating_add(Weight::from_parts(0, 13398)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -304,8 +332,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `183` // Estimated: `13548` - // Minimum execution time: 39_960_000 picoseconds. - Weight::from_parts(41_068_000, 0) + // Minimum execution time: 39_864_000 picoseconds. + Weight::from_parts(40_859_000, 0) .saturating_add(Weight::from_parts(0, 13548)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) @@ -318,8 +346,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 2_333_000 picoseconds. - Weight::from_parts(2_504_000, 0) + // Minimum execution time: 2_363_000 picoseconds. + Weight::from_parts(2_519_000, 0) .saturating_add(Weight::from_parts(0, 1485)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -330,8 +358,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7576` // Estimated: `11041` - // Minimum execution time: 22_932_000 picoseconds. - Weight::from_parts(23_307_000, 0) + // Minimum execution time: 22_409_000 picoseconds. + Weight::from_parts(22_776_000, 0) .saturating_add(Weight::from_parts(0, 11041)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -342,8 +370,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `23` // Estimated: `3488` - // Minimum execution time: 34_558_000 picoseconds. - Weight::from_parts(35_299_000, 0) + // Minimum execution time: 33_551_000 picoseconds. + Weight::from_parts(34_127_000, 0) .saturating_add(Weight::from_parts(0, 3488)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/westend/src/weights/runtime_parachains_assigner_on_demand.rs b/polkadot/runtime/westend/src/weights/runtime_parachains_assigner_on_demand.rs index ac0f05301b48..acd1834f79ed 100644 --- a/polkadot/runtime/westend/src/weights/runtime_parachains_assigner_on_demand.rs +++ b/polkadot/runtime/westend/src/weights/runtime_parachains_assigner_on_demand.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_parachains::assigner_on_demand` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-fljshgub-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot @@ -31,11 +31,11 @@ // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=runtime_parachains::assigner_on_demand -// --chain=rococo-dev -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --chain=westend-dev +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -48,44 +48,44 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::assigner_on_demand`. pub struct WeightInfo(PhantomData); impl runtime_parachains::assigner_on_demand::WeightInfo for WeightInfo { - /// Storage: `OnDemandAssignmentProvider::SpotTraffic` (r:1 w:0) - /// Proof: `OnDemandAssignmentProvider::SpotTraffic` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `Paras::ParaLifecycles` (r:1 w:0) - /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `OnDemandAssignmentProvider::OnDemandQueue` (r:1 w:1) - /// Proof: `OnDemandAssignmentProvider::OnDemandQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::QueueStatus` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::QueueStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::ParaIdAffinity` (r:1 w:0) + /// Proof: `OnDemandAssignmentProvider::ParaIdAffinity` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::FreeEntries` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::FreeEntries` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `s` is `[1, 9999]`. fn place_order_keep_alive(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `297 + s * (4 ±0)` - // Estimated: `3762 + s * (4 ±0)` - // Minimum execution time: 33_522_000 picoseconds. - Weight::from_parts(35_436_835, 0) - .saturating_add(Weight::from_parts(0, 3762)) - // Standard Error: 129 - .saturating_add(Weight::from_parts(14_041, 0).saturating_mul(s.into())) + // Measured: `218 + s * (8 ±0)` + // Estimated: `3681 + s * (8 ±0)` + // Minimum execution time: 21_396_000 picoseconds. + Weight::from_parts(20_585_695, 0) + .saturating_add(Weight::from_parts(0, 3681)) + // Standard Error: 127 + .saturating_add(Weight::from_parts(20_951, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(s.into())) } - /// Storage: `OnDemandAssignmentProvider::SpotTraffic` (r:1 w:0) - /// Proof: `OnDemandAssignmentProvider::SpotTraffic` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `Paras::ParaLifecycles` (r:1 w:0) - /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `OnDemandAssignmentProvider::OnDemandQueue` (r:1 w:1) - /// Proof: `OnDemandAssignmentProvider::OnDemandQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::QueueStatus` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::QueueStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::ParaIdAffinity` (r:1 w:0) + /// Proof: `OnDemandAssignmentProvider::ParaIdAffinity` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::FreeEntries` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::FreeEntries` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `s` is `[1, 9999]`. fn place_order_allow_death(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `297 + s * (4 ±0)` - // Estimated: `3762 + s * (4 ±0)` - // Minimum execution time: 33_488_000 picoseconds. - Weight::from_parts(34_848_934, 0) - .saturating_add(Weight::from_parts(0, 3762)) - // Standard Error: 143 - .saturating_add(Weight::from_parts(14_215, 0).saturating_mul(s.into())) + // Measured: `218 + s * (8 ±0)` + // Estimated: `3681 + s * (8 ±0)` + // Minimum execution time: 21_412_000 picoseconds. + Weight::from_parts(19_731_554, 0) + .saturating_add(Weight::from_parts(0, 3681)) + // Standard Error: 128 + .saturating_add(Weight::from_parts(21_055, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(s.into())) } } diff --git a/polkadot/runtime/westend/src/weights/runtime_parachains_paras_inherent.rs b/polkadot/runtime/westend/src/weights/runtime_parachains_paras_inherent.rs index 0dd64f054d00..aa99ac9438c4 100644 --- a/polkadot/runtime/westend/src/weights/runtime_parachains_paras_inherent.rs +++ b/polkadot/runtime/westend/src/weights/runtime_parachains_paras_inherent.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_parachains::paras_inherent` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-xerhrdyb-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot @@ -29,14 +29,13 @@ // --steps=50 // --repeat=20 // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=runtime_parachains::paras_inherent // --chain=westend-dev -// --header=./file_header.txt -// --output=./runtime/westend/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -49,297 +48,311 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::paras_inherent`. pub struct WeightInfo(PhantomData); impl runtime_parachains::paras_inherent::WeightInfo for WeightInfo { - /// Storage: ParaInherent Included (r:1 w:1) - /// Proof Skipped: ParaInherent Included (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System ParentHash (r:1 w:0) - /// Proof: System ParentHash (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler AvailabilityCores (r:1 w:1) - /// Proof Skipped: ParaScheduler AvailabilityCores (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Babe AuthorVrfRandomness (r:1 w:0) - /// Proof: Babe AuthorVrfRandomness (max_values: Some(1), max_size: Some(33), added: 528, mode: MaxEncodedLen) - /// Storage: ParaSessionInfo Sessions (r:1 w:0) - /// Proof Skipped: ParaSessionInfo Sessions (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasDisputes Disputes (r:1 w:1) - /// Proof Skipped: ParasDisputes Disputes (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasDisputes BackersOnDisputes (r:1 w:1) - /// Proof Skipped: ParasDisputes BackersOnDisputes (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasDisputes Included (r:1 w:1) - /// Proof Skipped: ParasDisputes Included (max_values: None, max_size: None, mode: Measured) - /// Storage: ParaSessionInfo AccountKeys (r:1 w:0) - /// Proof Skipped: ParaSessionInfo AccountKeys (max_values: None, max_size: None, mode: Measured) - /// Storage: Session Validators (r:1 w:0) - /// Proof Skipped: Session Validators (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Staking ActiveEra (r:1 w:0) - /// Proof: Staking ActiveEra (max_values: Some(1), max_size: Some(13), added: 508, mode: MaxEncodedLen) - /// Storage: Staking ErasRewardPoints (r:1 w:1) - /// Proof Skipped: Staking ErasRewardPoints (max_values: None, max_size: None, mode: Measured) - /// Storage: ParaInherent OnChainVotes (r:1 w:1) - /// Proof Skipped: ParaInherent OnChainVotes (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasDisputes Frozen (r:1 w:0) - /// Proof Skipped: ParasDisputes Frozen (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaInclusion PendingAvailability (r:2 w:1) - /// Proof Skipped: ParaInclusion PendingAvailability (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras Parachains (r:1 w:0) - /// Proof Skipped: Paras Parachains (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - /// Proof Skipped: ParaInclusion PendingAvailabilityCommitments (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:1) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannelDigests (r:1 w:1) - /// Proof Skipped: Hrmp HrmpChannelDigests (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras FutureCodeUpgrades (r:1 w:0) - /// Proof Skipped: Paras FutureCodeUpgrades (max_values: None, max_size: None, mode: Measured) - /// Storage: ParaScheduler SessionStartBlock (r:1 w:0) - /// Proof Skipped: ParaScheduler SessionStartBlock (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler ParathreadQueue (r:1 w:1) - /// Proof Skipped: ParaScheduler ParathreadQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler Scheduled (r:1 w:1) - /// Proof Skipped: ParaScheduler Scheduled (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler ValidatorGroups (r:1 w:0) - /// Proof Skipped: ParaScheduler ValidatorGroups (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Hrmp HrmpWatermarks (r:0 w:1) - /// Proof Skipped: Hrmp HrmpWatermarks (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras Heads (r:0 w:1) - /// Proof Skipped: Paras Heads (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeGoAheadSignal (r:0 w:1) - /// Proof Skipped: Paras UpgradeGoAheadSignal (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaSessionInfo::Sessions` (r:1 w:0) + /// Proof: `ParaSessionInfo::Sessions` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:1) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::BackersOnDisputes` (r:1 w:1) + /// Proof: `ParasDisputes::BackersOnDisputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:1 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaSessionInfo::AccountKeys` (r:1 w:0) + /// Proof: `ParaSessionInfo::AccountKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::Validators` (r:1 w:0) + /// Proof: `Session::Validators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ActiveEra` (r:1 w:0) + /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasRewardPoints` (r:1 w:1) + /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::V1` (r:2 w:1) + /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `v` is `[10, 200]`. fn enter_variable_disputes(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `50518` - // Estimated: `56458 + v * (23 ±0)` - // Minimum execution time: 998_338_000 picoseconds. - Weight::from_parts(468_412_001, 0) - .saturating_add(Weight::from_parts(0, 56458)) - // Standard Error: 20_559 - .saturating_add(Weight::from_parts(56_965_025, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(27)) - .saturating_add(T::DbWeight::get().writes(15)) + // Measured: `67518` + // Estimated: `73458 + v * (23 ±0)` + // Minimum execution time: 844_022_000 picoseconds. + Weight::from_parts(456_682_337, 0) + .saturating_add(Weight::from_parts(0, 73458)) + // Standard Error: 16_403 + .saturating_add(Weight::from_parts(41_871_245, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(28)) + .saturating_add(T::DbWeight::get().writes(16)) .saturating_add(Weight::from_parts(0, 23).saturating_mul(v.into())) } - /// Storage: ParaInherent Included (r:1 w:1) - /// Proof Skipped: ParaInherent Included (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System ParentHash (r:1 w:0) - /// Proof: System ParentHash (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler AvailabilityCores (r:1 w:1) - /// Proof Skipped: ParaScheduler AvailabilityCores (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Babe AuthorVrfRandomness (r:1 w:0) - /// Proof: Babe AuthorVrfRandomness (max_values: Some(1), max_size: Some(33), added: 528, mode: MaxEncodedLen) - /// Storage: ParaInherent OnChainVotes (r:1 w:1) - /// Proof Skipped: ParaInherent OnChainVotes (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasDisputes Frozen (r:1 w:0) - /// Proof Skipped: ParasDisputes Frozen (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaInclusion PendingAvailability (r:2 w:1) - /// Proof Skipped: ParaInclusion PendingAvailability (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras Parachains (r:1 w:0) - /// Proof Skipped: Paras Parachains (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - /// Proof Skipped: ParaInclusion PendingAvailabilityCommitments (max_values: None, max_size: None, mode: Measured) - /// Storage: ParaSessionInfo AccountKeys (r:1 w:0) - /// Proof Skipped: ParaSessionInfo AccountKeys (max_values: None, max_size: None, mode: Measured) - /// Storage: Session Validators (r:1 w:0) - /// Proof Skipped: Session Validators (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Staking ActiveEra (r:1 w:0) - /// Proof: Staking ActiveEra (max_values: Some(1), max_size: Some(13), added: 508, mode: MaxEncodedLen) - /// Storage: Staking ErasRewardPoints (r:1 w:1) - /// Proof Skipped: Staking ErasRewardPoints (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:1) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannelDigests (r:1 w:1) - /// Proof Skipped: Hrmp HrmpChannelDigests (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras FutureCodeUpgrades (r:1 w:0) - /// Proof Skipped: Paras FutureCodeUpgrades (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasDisputes Disputes (r:1 w:0) - /// Proof Skipped: ParasDisputes Disputes (max_values: None, max_size: None, mode: Measured) - /// Storage: ParaScheduler SessionStartBlock (r:1 w:0) - /// Proof Skipped: ParaScheduler SessionStartBlock (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler ParathreadQueue (r:1 w:1) - /// Proof Skipped: ParaScheduler ParathreadQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler Scheduled (r:1 w:1) - /// Proof Skipped: ParaScheduler Scheduled (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler ValidatorGroups (r:1 w:0) - /// Proof Skipped: ParaScheduler ValidatorGroups (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaInclusion AvailabilityBitfields (r:0 w:1) - /// Proof Skipped: ParaInclusion AvailabilityBitfields (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasDisputes Included (r:0 w:1) - /// Proof Skipped: ParasDisputes Included (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpWatermarks (r:0 w:1) - /// Proof Skipped: Hrmp HrmpWatermarks (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras Heads (r:0 w:1) - /// Proof Skipped: Paras Heads (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeGoAheadSignal (r:0 w:1) - /// Proof Skipped: Paras UpgradeGoAheadSignal (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::V1` (r:2 w:1) + /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaSessionInfo::AccountKeys` (r:1 w:0) + /// Proof: `ParaSessionInfo::AccountKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::Validators` (r:1 w:0) + /// Proof: `Session::Validators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ActiveEra` (r:1 w:0) + /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasRewardPoints` (r:1 w:1) + /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:0) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:0 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) fn enter_bitfields() -> Weight { // Proof Size summary in bytes: - // Measured: `42352` - // Estimated: `48292` - // Minimum execution time: 457_404_000 picoseconds. - Weight::from_parts(485_416_000, 0) - .saturating_add(Weight::from_parts(0, 48292)) - .saturating_add(T::DbWeight::get().reads(25)) + // Measured: `43196` + // Estimated: `49136` + // Minimum execution time: 438_637_000 picoseconds. + Weight::from_parts(458_342_000, 0) + .saturating_add(Weight::from_parts(0, 49136)) + .saturating_add(T::DbWeight::get().reads(26)) .saturating_add(T::DbWeight::get().writes(16)) } - /// Storage: ParaInherent Included (r:1 w:1) - /// Proof Skipped: ParaInherent Included (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System ParentHash (r:1 w:0) - /// Proof: System ParentHash (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler AvailabilityCores (r:1 w:1) - /// Proof Skipped: ParaScheduler AvailabilityCores (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Babe AuthorVrfRandomness (r:1 w:0) - /// Proof: Babe AuthorVrfRandomness (max_values: Some(1), max_size: Some(33), added: 528, mode: MaxEncodedLen) - /// Storage: ParaInherent OnChainVotes (r:1 w:1) - /// Proof Skipped: ParaInherent OnChainVotes (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasDisputes Frozen (r:1 w:0) - /// Proof Skipped: ParasDisputes Frozen (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaInclusion PendingAvailability (r:2 w:1) - /// Proof Skipped: ParaInclusion PendingAvailability (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras Parachains (r:1 w:0) - /// Proof Skipped: Paras Parachains (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - /// Proof Skipped: ParaInclusion PendingAvailabilityCommitments (max_values: None, max_size: None, mode: Measured) - /// Storage: ParaSessionInfo AccountKeys (r:1 w:0) - /// Proof Skipped: ParaSessionInfo AccountKeys (max_values: None, max_size: None, mode: Measured) - /// Storage: Session Validators (r:1 w:0) - /// Proof Skipped: Session Validators (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Staking ActiveEra (r:1 w:0) - /// Proof: Staking ActiveEra (max_values: Some(1), max_size: Some(13), added: 508, mode: MaxEncodedLen) - /// Storage: Staking ErasRewardPoints (r:1 w:1) - /// Proof Skipped: Staking ErasRewardPoints (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:1) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannelDigests (r:1 w:1) - /// Proof Skipped: Hrmp HrmpChannelDigests (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras FutureCodeUpgrades (r:1 w:0) - /// Proof Skipped: Paras FutureCodeUpgrades (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasDisputes Disputes (r:1 w:0) - /// Proof Skipped: ParasDisputes Disputes (max_values: None, max_size: None, mode: Measured) - /// Storage: ParaScheduler SessionStartBlock (r:1 w:0) - /// Proof Skipped: ParaScheduler SessionStartBlock (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler ParathreadQueue (r:1 w:1) - /// Proof Skipped: ParaScheduler ParathreadQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler Scheduled (r:1 w:1) - /// Proof Skipped: ParaScheduler Scheduled (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler ValidatorGroups (r:1 w:0) - /// Proof Skipped: ParaScheduler ValidatorGroups (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras CurrentCodeHash (r:1 w:0) - /// Proof Skipped: Paras CurrentCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:1 w:0) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) - /// Storage: ParasDisputes Included (r:0 w:1) - /// Proof Skipped: ParasDisputes Included (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpWatermarks (r:0 w:1) - /// Proof Skipped: Hrmp HrmpWatermarks (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras Heads (r:0 w:1) - /// Proof Skipped: Paras Heads (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeGoAheadSignal (r:0 w:1) - /// Proof Skipped: Paras UpgradeGoAheadSignal (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::V1` (r:2 w:1) + /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaSessionInfo::AccountKeys` (r:1 w:0) + /// Proof: `ParaSessionInfo::AccountKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::Validators` (r:1 w:0) + /// Proof: `Session::Validators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ActiveEra` (r:1 w:0) + /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasRewardPoints` (r:1 w:1) + /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:0) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:0 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `v` is `[101, 200]`. fn enter_backed_candidates_variable(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `42387` - // Estimated: `48327` - // Minimum execution time: 6_864_029_000 picoseconds. - Weight::from_parts(1_237_704_892, 0) - .saturating_add(Weight::from_parts(0, 48327)) - // Standard Error: 33_413 - .saturating_add(Weight::from_parts(56_199_819, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(28)) - .saturating_add(T::DbWeight::get().writes(15)) + // Measured: `43269` + // Estimated: `49209` + // Minimum execution time: 5_955_361_000 picoseconds. + Weight::from_parts(1_285_398_956, 0) + .saturating_add(Weight::from_parts(0, 49209)) + // Standard Error: 57_369 + .saturating_add(Weight::from_parts(47_073_853, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(29)) + .saturating_add(T::DbWeight::get().writes(16)) } - /// Storage: ParaInherent Included (r:1 w:1) - /// Proof Skipped: ParaInherent Included (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System ParentHash (r:1 w:0) - /// Proof: System ParentHash (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler AvailabilityCores (r:1 w:1) - /// Proof Skipped: ParaScheduler AvailabilityCores (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Babe AuthorVrfRandomness (r:1 w:0) - /// Proof: Babe AuthorVrfRandomness (max_values: Some(1), max_size: Some(33), added: 528, mode: MaxEncodedLen) - /// Storage: ParaInherent OnChainVotes (r:1 w:1) - /// Proof Skipped: ParaInherent OnChainVotes (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasDisputes Frozen (r:1 w:0) - /// Proof Skipped: ParasDisputes Frozen (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaInclusion PendingAvailability (r:2 w:1) - /// Proof Skipped: ParaInclusion PendingAvailability (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras Parachains (r:1 w:0) - /// Proof Skipped: Paras Parachains (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - /// Proof Skipped: ParaInclusion PendingAvailabilityCommitments (max_values: None, max_size: None, mode: Measured) - /// Storage: ParaSessionInfo AccountKeys (r:1 w:0) - /// Proof Skipped: ParaSessionInfo AccountKeys (max_values: None, max_size: None, mode: Measured) - /// Storage: Session Validators (r:1 w:0) - /// Proof Skipped: Session Validators (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Staking ActiveEra (r:1 w:0) - /// Proof: Staking ActiveEra (max_values: Some(1), max_size: Some(13), added: 508, mode: MaxEncodedLen) - /// Storage: Staking ErasRewardPoints (r:1 w:1) - /// Proof Skipped: Staking ErasRewardPoints (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:1) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannelDigests (r:1 w:1) - /// Proof Skipped: Hrmp HrmpChannelDigests (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras FutureCodeUpgrades (r:1 w:0) - /// Proof Skipped: Paras FutureCodeUpgrades (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasDisputes Disputes (r:1 w:0) - /// Proof Skipped: ParasDisputes Disputes (max_values: None, max_size: None, mode: Measured) - /// Storage: ParaScheduler SessionStartBlock (r:1 w:0) - /// Proof Skipped: ParaScheduler SessionStartBlock (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler ParathreadQueue (r:1 w:1) - /// Proof Skipped: ParaScheduler ParathreadQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler Scheduled (r:1 w:1) - /// Proof Skipped: ParaScheduler Scheduled (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParaScheduler ValidatorGroups (r:1 w:0) - /// Proof Skipped: ParaScheduler ValidatorGroups (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras CurrentCodeHash (r:1 w:0) - /// Proof Skipped: Paras CurrentCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras FutureCodeHash (r:1 w:0) - /// Proof Skipped: Paras FutureCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeRestrictionSignal (r:1 w:0) - /// Proof Skipped: Paras UpgradeRestrictionSignal (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:1 w:0) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) - /// Storage: ParasDisputes Included (r:0 w:1) - /// Proof Skipped: ParasDisputes Included (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpWatermarks (r:0 w:1) - /// Proof Skipped: Hrmp HrmpWatermarks (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras Heads (r:0 w:1) - /// Proof Skipped: Paras Heads (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeGoAheadSignal (r:0 w:1) - /// Proof Skipped: Paras UpgradeGoAheadSignal (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::V1` (r:2 w:1) + /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaSessionInfo::AccountKeys` (r:1 w:0) + /// Proof: `ParaSessionInfo::AccountKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::Validators` (r:1 w:0) + /// Proof: `Session::Validators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ActiveEra` (r:1 w:0) + /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasRewardPoints` (r:1 w:1) + /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:0) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeHash` (r:1 w:0) + /// Proof: `Paras::FutureCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeRestrictionSignal` (r:1 w:0) + /// Proof: `Paras::UpgradeRestrictionSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:0 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) fn enter_backed_candidate_code_upgrade() -> Weight { // Proof Size summary in bytes: - // Measured: `42414` - // Estimated: `48354` - // Minimum execution time: 43_320_529_000 picoseconds. - Weight::from_parts(45_622_613_000, 0) - .saturating_add(Weight::from_parts(0, 48354)) - .saturating_add(T::DbWeight::get().reads(30)) - .saturating_add(T::DbWeight::get().writes(15)) + // Measured: `43282` + // Estimated: `49222` + // Minimum execution time: 42_128_606_000 picoseconds. + Weight::from_parts(42_822_806_000, 0) + .saturating_add(Weight::from_parts(0, 49222)) + .saturating_add(T::DbWeight::get().reads(31)) + .saturating_add(T::DbWeight::get().writes(16)) } } diff --git a/polkadot/runtime/westend/src/weights/xcm/mod.rs b/polkadot/runtime/westend/src/weights/xcm/mod.rs index 4a74db1fe56d..4d7b930bcfe9 100644 --- a/polkadot/runtime/westend/src/weights/xcm/mod.rs +++ b/polkadot/runtime/westend/src/weights/xcm/mod.rs @@ -144,7 +144,7 @@ impl XcmWeightInfo for WestendXcmWeight { fn descend_origin(_who: &InteriorLocation) -> Weight { XcmGeneric::::descend_origin() } - fn report_error(_query_repsonse_info: &QueryResponseInfo) -> Weight { + fn report_error(_query_response_info: &QueryResponseInfo) -> Weight { XcmGeneric::::report_error() } diff --git a/polkadot/runtime/westend/src/xcm_config.rs b/polkadot/runtime/westend/src/xcm_config.rs index 7281007f0060..96d2a124ff9a 100644 --- a/polkadot/runtime/westend/src/xcm_config.rs +++ b/polkadot/runtime/westend/src/xcm_config.rs @@ -41,10 +41,11 @@ use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, ChildParachainAsNative, ChildParachainConvertsVia, DescribeAllTerminal, DescribeFamily, FrameTransactionalProcessor, - FungibleAdapter, HashedDescription, IsConcrete, MintLocation, OriginToPluralityVoice, - SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, - TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, - XcmFeeManagerFromComponents, XcmFeeToAccount, + FungibleAdapter, HashedDescription, IsChildSystemParachain, IsConcrete, MintLocation, + OriginToPluralityVoice, SignedAccountId32AsNative, SignedToAccountId32, + SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, + WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, + XcmFeeToAccount, }; use xcm_executor::XcmExecutor; @@ -142,13 +143,12 @@ impl Contains for OnlyParachains { } } -pub struct CollectivesOrFellows; -impl Contains for CollectivesOrFellows { +pub struct Fellows; +impl Contains for Fellows { fn contains(location: &Location) -> bool { matches!( location.unpack(), - (0, [Parachain(COLLECTIVES_ID)]) | - (0, [Parachain(COLLECTIVES_ID), Plurality { id: BodyId::Technical, .. }]) + (0, [Parachain(COLLECTIVES_ID), Plurality { id: BodyId::Technical, .. }]) ) } } @@ -172,8 +172,8 @@ pub type Barrier = TrailingSetTopicAsId<( AllowTopLevelPaidExecutionFrom, // Subscriptions for version tracking are OK. AllowSubscriptionsFrom, - // Collectives and Fellows plurality get free execution. - AllowExplicitUnpaidExecutionFrom, + // Messages from system parachains or the Fellows plurality need not pay for execution. + AllowExplicitUnpaidExecutionFrom<(IsChildSystemParachain, Fellows)>, ), UniversalLocation, ConstU32<8>, @@ -219,6 +219,9 @@ impl xcm_executor::Config for XcmConfig { type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); } parameter_types! { @@ -269,7 +272,9 @@ pub type LocalPalletOriginToLocation = ( impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; + // Note that this configuration of `SendXcmOrigin` is different from the one present in + // production. + type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; type XcmRouter = XcmRouter; // Anyone can execute XCM messages locally... type ExecuteXcmOrigin = xcm_builder::EnsureXcmOrigin; diff --git a/polkadot/scripts/packaging/polkadot.service b/polkadot/scripts/packaging/polkadot.service index 7fb549c97f8b..8c5a483d4243 100644 --- a/polkadot/scripts/packaging/polkadot.service +++ b/polkadot/scripts/packaging/polkadot.service @@ -25,12 +25,13 @@ ProtectKernelTunables=true ProtectSystem=strict RemoveIPC=true RestrictAddressFamilies=AF_INET AF_INET6 AF_NETLINK AF_UNIX -RestrictNamespaces=true +RestrictNamespaces=false RestrictSUIDSGID=true SystemCallArchitectures=native SystemCallFilter=@system-service -SystemCallFilter=landlock_add_rule landlock_create_ruleset landlock_restrict_self seccomp -SystemCallFilter=~@clock @module @mount @reboot @swap @privileged +SystemCallFilter=landlock_add_rule landlock_create_ruleset landlock_restrict_self seccomp mount umount2 +SystemCallFilter=~@clock @module @reboot @swap @privileged +SystemCallFilter=pivot_root UMask=0027 [Install] diff --git a/polkadot/tests/common.rs b/polkadot/tests/common.rs index 15721c990e01..dbee2d365034 100644 --- a/polkadot/tests/common.rs +++ b/polkadot/tests/common.rs @@ -48,8 +48,8 @@ pub async fn wait_n_finalized_blocks(n: usize, url: &str) { /// Read the WS address from the output. /// -/// This is hack to get the actual binded sockaddr because -/// polkadot assigns a random port if the specified port was already binded. +/// This is hack to get the actual bound sockaddr because +/// polkadot assigns a random port if the specified port was already bound. /// /// You must call /// `Command::new("cmd").stdout(process::Stdio::piped()).stderr(process::Stdio::piped())` diff --git a/polkadot/tests/running_the_node_and_interrupt.rs b/polkadot/tests/running_the_node_and_interrupt.rs index 079c34e0421e..85c073d3023a 100644 --- a/polkadot/tests/running_the_node_and_interrupt.rs +++ b/polkadot/tests/running_the_node_and_interrupt.rs @@ -32,7 +32,7 @@ async fn running_the_node_works_and_can_be_interrupted() { }; async fn run_command_and_kill(signal: Signal) { - let tmpdir = tempdir().expect("coult not create temp dir"); + let tmpdir = tempdir().expect("could not create temp dir"); let mut cmd = Command::new(cargo_bin("polkadot")) .stdout(process::Stdio::piped()) diff --git a/polkadot/xcm/Cargo.toml b/polkadot/xcm/Cargo.toml index 273ffd247491..5b3f4f2d8d63 100644 --- a/polkadot/xcm/Cargo.toml +++ b/polkadot/xcm/Cargo.toml @@ -16,7 +16,7 @@ derivative = { version = "2.2.0", default-features = false, features = ["use_cor impl-trait-for-tuples = "0.2.2" log = { workspace = true } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } sp-weights = { path = "../../substrate/primitives/weights", default-features = false, features = ["serde"] } serde = { features = ["alloc", "derive", "rc"], workspace = true } schemars = { version = "0.8.13", default-features = true, optional = true } diff --git a/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml b/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml index d3a8db4ea955..7d9d6f0ee0a1 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml +++ b/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../substrate/frame/system", default-features = false } sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs index c75ecbceb083..c831cd024659 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs @@ -145,6 +145,9 @@ impl xcm_executor::Config for XcmConfig { type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); } impl crate::Config for Test { diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs index b037d3dd8b23..534f7d85ea2e 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs @@ -135,6 +135,9 @@ impl xcm_executor::Config for XcmConfig { type SafeCallFilter = Everything; type Aliasers = Aliasers; type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); } parameter_types! { diff --git a/polkadot/xcm/pallet-xcm/Cargo.toml b/polkadot/xcm/pallet-xcm/Cargo.toml index cce3d500398b..f04e6202f4c5 100644 --- a/polkadot/xcm/pallet-xcm/Cargo.toml +++ b/polkadot/xcm/pallet-xcm/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] bounded-collections = { version = "0.2.0", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } log = { workspace = true } @@ -25,6 +25,7 @@ sp-runtime = { path = "../../../substrate/primitives/runtime", default-features xcm = { package = "staging-xcm", path = "..", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../xcm-executor", default-features = false } xcm-builder = { package = "staging-xcm-builder", path = "../xcm-builder", default-features = false } +xcm-fee-payment-runtime-api = { path = "../xcm-fee-payment-runtime-api", default-features = false } # marked optional, used in benchmarking frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } @@ -52,6 +53,7 @@ std = [ "sp-runtime/std", "xcm-builder/std", "xcm-executor/std", + "xcm-fee-payment-runtime-api/std", "xcm/std", ] runtime-benchmarks = [ diff --git a/polkadot/xcm/pallet-xcm/src/benchmarking.rs b/polkadot/xcm/pallet-xcm/src/benchmarking.rs index 7b3518db35bd..d44b30b96db8 100644 --- a/polkadot/xcm/pallet-xcm/src/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm/src/benchmarking.rs @@ -16,6 +16,7 @@ use super::*; use bounded_collections::{ConstU32, WeakBoundedVec}; +use codec::Encode; use frame_benchmarking::{benchmarks, whitelisted_caller, BenchmarkError, BenchmarkResult}; use frame_support::{ traits::fungible::{Inspect, Mutate}, @@ -108,6 +109,21 @@ benchmarks! { let versioned_msg = VersionedXcm::from(msg); }: _>(send_origin, Box::new(versioned_dest), Box::new(versioned_msg)) + send_blob { + let send_origin = + T::SendXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + if T::SendXcmOrigin::try_origin(send_origin.clone()).is_err() { + return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) + } + let msg = Xcm::<()>(vec![ClearOrigin]); + let versioned_dest: VersionedLocation = T::reachable_dest().ok_or( + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), + )? + .into(); + let versioned_msg = VersionedXcm::from(msg); + let encoded_versioned_msg = versioned_msg.encode().try_into().unwrap(); + }: _>(send_origin, Box::new(versioned_dest), encoded_versioned_msg) + teleport_assets { let (asset, destination) = T::teleportable_asset_and_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), @@ -227,6 +243,19 @@ benchmarks! { let versioned_msg = VersionedXcm::from(msg); }: _>(execute_origin, Box::new(versioned_msg), Weight::MAX) + execute_blob { + let execute_origin = + T::ExecuteXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + let origin_location = T::ExecuteXcmOrigin::try_origin(execute_origin.clone()) + .map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; + let msg = Xcm(vec![ClearOrigin]); + if !T::XcmExecuteFilter::contains(&(origin_location, msg.clone())) { + return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) + } + let versioned_msg = VersionedXcm::from(msg); + let encoded_versioned_msg = versioned_msg.encode().try_into().unwrap(); + }: _>(execute_origin, encoded_versioned_msg, Weight::MAX) + force_xcm_version { let loc = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index c17c7eaf4715..99403c703a9c 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -53,8 +53,8 @@ use sp_runtime::{ }; use xcm::{latest::QueryResponseInfo, prelude::*}; use xcm_builder::{ - ExecuteController, ExecuteControllerWeightInfo, QueryController, QueryControllerWeightInfo, - SendController, SendControllerWeightInfo, + ExecuteController, ExecuteControllerWeightInfo, MaxXcmEncodedSize, QueryController, + QueryControllerWeightInfo, SendController, SendControllerWeightInfo, }; use xcm_executor::{ traits::{ @@ -64,6 +64,7 @@ use xcm_executor::{ }, AssetsInHolding, }; +use xcm_fee_payment_runtime_api::Error as FeePaymentError; #[cfg(any(feature = "try-runtime", test))] use sp_runtime::TryRuntimeError; @@ -89,6 +90,8 @@ pub trait WeightInfo { fn new_query() -> Weight; fn take_response() -> Weight; fn claim_assets() -> Weight; + fn execute_blob() -> Weight; + fn send_blob() -> Weight; } /// fallback implementation @@ -173,6 +176,14 @@ impl WeightInfo for TestWeightInfo { fn claim_assets() -> Weight { Weight::from_parts(100_000_000, 0) } + + fn execute_blob() -> Weight { + Weight::from_parts(100_000_000, 0) + } + + fn send_blob() -> Weight { + Weight::from_parts(100_000_000, 0) + } } #[frame_support::pallet] @@ -288,76 +299,49 @@ pub mod pallet { } impl ExecuteControllerWeightInfo for Pallet { - fn execute() -> Weight { - T::WeightInfo::execute() + fn execute_blob() -> Weight { + T::WeightInfo::execute_blob() } } impl ExecuteController, ::RuntimeCall> for Pallet { type WeightInfo = Self; - fn execute( + fn execute_blob( origin: OriginFor, - message: Box::RuntimeCall>>, + encoded_message: BoundedVec, max_weight: Weight, ) -> Result { - log::trace!(target: "xcm::pallet_xcm::execute", "message {:?}, max_weight {:?}", message, max_weight); - let outcome = (|| { - let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; - let mut hash = message.using_encoded(sp_io::hashing::blake2_256); - let message = (*message).try_into().map_err(|()| Error::::BadVersion)?; - let value = (origin_location, message); - ensure!(T::XcmExecuteFilter::contains(&value), Error::::Filtered); - let (origin_location, message) = value; - Ok(T::XcmExecutor::prepare_and_execute( - origin_location, - message, - &mut hash, - max_weight, - max_weight, - )) - })() - .map_err(|e: DispatchError| { - e.with_weight(::execute()) - })?; - - Self::deposit_event(Event::Attempted { outcome: outcome.clone() }); - let weight_used = outcome.weight_used(); - outcome.ensure_complete().map_err(|error| { - log::error!(target: "xcm::pallet_xcm::execute", "XCM execution failed with error {:?}", error); - Error::::LocalExecutionIncomplete.with_weight( - weight_used.saturating_add( - ::execute(), - ), - ) - })?; - Ok(weight_used) + let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; + let message = + VersionedXcm::<::RuntimeCall>::decode(&mut &encoded_message[..]) + .map_err(|error| { + log::error!(target: "xcm::execute_blob", "Unable to decode XCM, error: {:?}", error); + Error::::UnableToDecode + })?; + Self::execute_base(origin_location, Box::new(message), max_weight) } } impl SendControllerWeightInfo for Pallet { - fn send() -> Weight { - T::WeightInfo::send() + fn send_blob() -> Weight { + T::WeightInfo::send_blob() } } impl SendController> for Pallet { type WeightInfo = Self; - fn send( + fn send_blob( origin: OriginFor, dest: Box, - message: Box>, + encoded_message: BoundedVec, ) -> Result { let origin_location = T::SendXcmOrigin::ensure_origin(origin)?; - let interior: Junctions = - origin_location.clone().try_into().map_err(|_| Error::::InvalidOrigin)?; - let dest = Location::try_from(*dest).map_err(|()| Error::::BadVersion)?; - let message: Xcm<()> = (*message).try_into().map_err(|()| Error::::BadVersion)?; - - let message_id = Self::send_xcm(interior, dest.clone(), message.clone()) - .map_err(Error::::from)?; - let e = Event::Sent { origin: origin_location, destination: dest, message, message_id }; - Self::deposit_event(e); - Ok(message_id) + let message = + VersionedXcm::<()>::decode(&mut &encoded_message[..]).map_err(|error| { + log::error!(target: "xcm::send_blob", "Unable to decode XCM, error: {:?}", error); + Error::::UnableToDecode + })?; + Self::send_base(origin_location, dest, Box::new(message)) } } @@ -377,7 +361,7 @@ pub mod pallet { origin: OriginFor, timeout: BlockNumberFor, match_querier: VersionedLocation, - ) -> Result { + ) -> Result { let responder = ::ExecuteXcmOrigin::ensure_origin(origin)?; let query_id = ::new_query( responder, @@ -564,6 +548,11 @@ pub mod pallet { TooManyReserves, /// Local XCM execution incomplete. LocalExecutionIncomplete, + /// Could not decode XCM. + UnableToDecode, + /// XCM encoded length is too large. + /// Returned when an XCM encoded length is larger than `MaxXcmEncodedSize`. + XcmTooLarge, } impl From for Error { @@ -901,16 +890,72 @@ pub mod pallet { } } - #[pallet::call] impl Pallet { + /// Underlying logic for both [`execute_blob`] and [`execute`]. + fn execute_base( + origin_location: Location, + message: Box::RuntimeCall>>, + max_weight: Weight, + ) -> Result { + log::trace!(target: "xcm::pallet_xcm::execute", "message {:?}, max_weight {:?}", message, max_weight); + let outcome = (|| { + let mut hash = message.using_encoded(sp_io::hashing::blake2_256); + let message = (*message).try_into().map_err(|()| Error::::BadVersion)?; + let value = (origin_location, message); + ensure!(T::XcmExecuteFilter::contains(&value), Error::::Filtered); + let (origin_location, message) = value; + Ok(T::XcmExecutor::prepare_and_execute( + origin_location, + message, + &mut hash, + max_weight, + max_weight, + )) + })() + .map_err(|e: DispatchError| e.with_weight(T::WeightInfo::execute()))?; + + Self::deposit_event(Event::Attempted { outcome: outcome.clone() }); + let weight_used = outcome.weight_used(); + outcome.ensure_complete().map_err(|error| { + log::error!(target: "xcm::pallet_xcm::execute", "XCM execution failed with error {:?}", error); + Error::::LocalExecutionIncomplete + .with_weight(weight_used.saturating_add(T::WeightInfo::execute())) + })?; + Ok(weight_used) + } + + /// Underlying logic for both [`send_blob`] and [`send`]. + fn send_base( + origin_location: Location, + dest: Box, + message: Box>, + ) -> Result { + let interior: Junctions = + origin_location.clone().try_into().map_err(|_| Error::::InvalidOrigin)?; + let dest = Location::try_from(*dest).map_err(|()| Error::::BadVersion)?; + let message: Xcm<()> = (*message).try_into().map_err(|()| Error::::BadVersion)?; + + let message_id = Self::send_xcm(interior, dest.clone(), message.clone()) + .map_err(Error::::from)?; + let e = Event::Sent { origin: origin_location, destination: dest, message, message_id }; + Self::deposit_event(e); + Ok(message_id) + } + } + + #[pallet::call(weight(::WeightInfo))] + impl Pallet { + /// WARNING: DEPRECATED. `send` will be removed after June 2024. Use `send_blob` instead. + #[allow(deprecated)] + #[deprecated(note = "`send` will be removed after June 2024. Use `send_blob` instead.")] #[pallet::call_index(0)] - #[pallet::weight(T::WeightInfo::send())] pub fn send( origin: OriginFor, dest: Box, message: Box>, ) -> DispatchResult { - >::send(origin, dest, message)?; + let origin_location = T::SendXcmOrigin::ensure_origin(origin)?; + Self::send_base(origin_location, dest, message)?; Ok(()) } @@ -933,23 +978,10 @@ pub mod pallet { /// - `fee_asset_item`: The index into `assets` of the item which should be used to pay /// fees. #[pallet::call_index(1)] - #[pallet::weight({ - let maybe_assets: Result = (*assets.clone()).try_into(); - let maybe_dest: Result = (*dest.clone()).try_into(); - match (maybe_assets, maybe_dest) { - (Ok(assets), Ok(dest)) => { - use alloc::vec; - let count = assets.len() as u32; - let mut message = Xcm(vec![ - WithdrawAsset(assets), - SetFeesMode { jit_withdraw: true }, - InitiateTeleport { assets: Wild(AllCounted(count)), dest, xcm: Xcm(vec![]) }, - ]); - T::Weigher::weight(&mut message).map_or(Weight::MAX, |w| T::WeightInfo::teleport_assets().saturating_add(w)) - } - _ => Weight::MAX, - } - })] + #[allow(deprecated)] + #[deprecated( + note = "This extrinsic uses `WeightLimit::Unlimited`, please migrate to `limited_teleport_assets` or `transfer_assets`" + )] pub fn teleport_assets( origin: OriginFor, dest: Box, @@ -991,23 +1023,10 @@ pub mod pallet { /// - `fee_asset_item`: The index into `assets` of the item which should be used to pay /// fees. #[pallet::call_index(2)] - #[pallet::weight({ - let maybe_assets: Result = (*assets.clone()).try_into(); - let maybe_dest: Result = (*dest.clone()).try_into(); - match (maybe_assets, maybe_dest) { - (Ok(assets), Ok(dest)) => { - use alloc::vec; - // heaviest version of locally executed XCM program: equivalent in weight to - // transfer assets to SA, reanchor them, extend XCM program, and send onward XCM - let mut message = Xcm(vec![ - SetFeesMode { jit_withdraw: true }, - TransferReserveAsset { assets, dest, xcm: Xcm(vec![]) } - ]); - T::Weigher::weight(&mut message).map_or(Weight::MAX, |w| T::WeightInfo::reserve_transfer_assets().saturating_add(w)) - } - _ => Weight::MAX, - } - })] + #[allow(deprecated)] + #[deprecated( + note = "This extrinsic uses `WeightLimit::Unlimited`, please migrate to `limited_reserve_transfer_assets` or `transfer_assets`" + )] pub fn reserve_transfer_assets( origin: OriginFor, dest: Box, @@ -1033,6 +1052,13 @@ pub mod pallet { /// No more than `max_weight` will be used in its attempted execution. If this is less than /// the maximum amount of weight that the message could take to be executed, then no /// execution attempt will be made. + /// + /// WARNING: DEPRECATED. `execute` will be removed after June 2024. Use `execute_blob` + /// instead. + #[allow(deprecated)] + #[deprecated( + note = "`execute` will be removed after June 2024. Use `execute_blob` instead." + )] #[pallet::call_index(3)] #[pallet::weight(max_weight.saturating_add(T::WeightInfo::execute()))] pub fn execute( @@ -1040,8 +1066,8 @@ pub mod pallet { message: Box::RuntimeCall>>, max_weight: Weight, ) -> DispatchResultWithPostInfo { - let weight_used = - >::execute(origin, message, max_weight)?; + let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; + let weight_used = Self::execute_base(origin_location, message, max_weight)?; Ok(Some(weight_used.saturating_add(T::WeightInfo::execute())).into()) } @@ -1052,7 +1078,6 @@ pub mod pallet { /// - `location`: The destination that is being described. /// - `xcm_version`: The latest version of XCM that `location` supports. #[pallet::call_index(4)] - #[pallet::weight(T::WeightInfo::force_xcm_version())] pub fn force_xcm_version( origin: OriginFor, location: Box, @@ -1071,7 +1096,6 @@ pub mod pallet { /// - `origin`: Must be an origin specified by AdminOrigin. /// - `maybe_xcm_version`: The default XCM encoding version, or `None` to disable. #[pallet::call_index(5)] - #[pallet::weight(T::WeightInfo::force_default_xcm_version())] pub fn force_default_xcm_version( origin: OriginFor, maybe_xcm_version: Option, @@ -1086,7 +1110,6 @@ pub mod pallet { /// - `origin`: Must be an origin specified by AdminOrigin. /// - `location`: The location to which we should subscribe for XCM version notifications. #[pallet::call_index(6)] - #[pallet::weight(T::WeightInfo::force_subscribe_version_notify())] pub fn force_subscribe_version_notify( origin: OriginFor, location: Box, @@ -1110,7 +1133,6 @@ pub mod pallet { /// - `location`: The location to which we are currently subscribed for XCM version /// notifications which we no longer desire. #[pallet::call_index(7)] - #[pallet::weight(T::WeightInfo::force_unsubscribe_version_notify())] pub fn force_unsubscribe_version_notify( origin: OriginFor, location: Box, @@ -1143,7 +1165,7 @@ pub mod pallet { /// /// Fee payment on the destination side is made from the asset in the `assets` vector of /// index `fee_asset_item`, up to enough to pay for `weight_limit` of weight. If more weight - /// is needed than `weight_limit`, then the operation will fail and the assets send may be + /// is needed than `weight_limit`, then the operation will fail and the sent assets may be /// at risk. /// /// - `origin`: Must be capable of withdrawing the `assets` and executing XCM. @@ -1158,23 +1180,7 @@ pub mod pallet { /// fees. /// - `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase. #[pallet::call_index(8)] - #[pallet::weight({ - let maybe_assets: Result = (*assets.clone()).try_into(); - let maybe_dest: Result = (*dest.clone()).try_into(); - match (maybe_assets, maybe_dest) { - (Ok(assets), Ok(dest)) => { - use alloc::vec; - // heaviest version of locally executed XCM program: equivalent in weight to - // transfer assets to SA, reanchor them, extend XCM program, and send onward XCM - let mut message = Xcm(vec![ - SetFeesMode { jit_withdraw: true }, - TransferReserveAsset { assets, dest, xcm: Xcm(vec![]) } - ]); - T::Weigher::weight(&mut message).map_or(Weight::MAX, |w| T::WeightInfo::reserve_transfer_assets().saturating_add(w)) - } - _ => Weight::MAX, - } - })] + #[pallet::weight(T::WeightInfo::reserve_transfer_assets())] pub fn limited_reserve_transfer_assets( origin: OriginFor, dest: Box, @@ -1197,7 +1203,7 @@ pub mod pallet { /// /// Fee payment on the destination side is made from the asset in the `assets` vector of /// index `fee_asset_item`, up to enough to pay for `weight_limit` of weight. If more weight - /// is needed than `weight_limit`, then the operation will fail and the assets send may be + /// is needed than `weight_limit`, then the operation will fail and the sent assets may be /// at risk. /// /// - `origin`: Must be capable of withdrawing the `assets` and executing XCM. @@ -1212,23 +1218,7 @@ pub mod pallet { /// fees. /// - `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase. #[pallet::call_index(9)] - #[pallet::weight({ - let maybe_assets: Result = (*assets.clone()).try_into(); - let maybe_dest: Result = (*dest.clone()).try_into(); - match (maybe_assets, maybe_dest) { - (Ok(assets), Ok(dest)) => { - use alloc::vec; - let count = assets.len() as u32; - let mut message = Xcm(vec![ - WithdrawAsset(assets), - SetFeesMode { jit_withdraw: true }, - InitiateTeleport { assets: Wild(AllCounted(count)), dest, xcm: Xcm(vec![]) }, - ]); - T::Weigher::weight(&mut message).map_or(Weight::MAX, |w| T::WeightInfo::teleport_assets().saturating_add(w)) - } - _ => Weight::MAX, - } - })] + #[pallet::weight(T::WeightInfo::teleport_assets())] pub fn limited_teleport_assets( origin: OriginFor, dest: Box, @@ -1252,7 +1242,6 @@ pub mod pallet { /// - `origin`: Must be an origin specified by AdminOrigin. /// - `suspended`: `true` to suspend, `false` to resume. #[pallet::call_index(10)] - #[pallet::weight(T::WeightInfo::force_suspension())] pub fn force_suspension(origin: OriginFor, suspended: bool) -> DispatchResult { T::AdminOrigin::ensure_origin(origin)?; XcmExecutionSuspended::::set(suspended); @@ -1265,7 +1254,7 @@ pub mod pallet { /// Fee payment on the destination side is made from the asset in the `assets` vector of /// index `fee_asset_item` (hence referred to as `fees`), up to enough to pay for /// `weight_limit` of weight. If more weight is needed than `weight_limit`, then the - /// operation will fail and the assets sent may be at risk. + /// operation will fail and the sent assets may be at risk. /// /// `assets` (excluding `fees`) must have same reserve location or otherwise be teleportable /// to `dest`, no limitations imposed on `fees`. @@ -1293,26 +1282,6 @@ pub mod pallet { /// fees. /// - `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase. #[pallet::call_index(11)] - #[pallet::weight({ - let maybe_assets: Result = (*assets.clone()).try_into(); - let maybe_dest: Result = (*dest.clone()).try_into(); - match (maybe_assets, maybe_dest) { - (Ok(assets), Ok(dest)) => { - use alloc::vec; - // heaviest version of locally executed XCM program: equivalent in weight to withdrawing fees, - // burning them, transferring rest of assets to SA, reanchoring them, extending XCM program, - // and sending onward XCM - let mut message = Xcm(vec![ - SetFeesMode { jit_withdraw: true }, - WithdrawAsset(assets.clone()), - BurnAsset(assets.clone()), - TransferReserveAsset { assets, dest, xcm: Xcm(vec![]) } - ]); - T::Weigher::weight(&mut message).map_or(Weight::MAX, |w| T::WeightInfo::transfer_assets().saturating_add(w)) - } - _ => Weight::MAX, - } - })] pub fn transfer_assets( origin: OriginFor, dest: Box, @@ -1402,22 +1371,6 @@ pub mod pallet { /// was the latest when they were trapped. /// - `beneficiary`: The location/account where the claimed assets will be deposited. #[pallet::call_index(12)] - #[pallet::weight({ - let assets_version = assets.identify_version(); - let maybe_assets: Result = (*assets.clone()).try_into(); - let maybe_beneficiary: Result = (*beneficiary.clone()).try_into(); - match (maybe_assets, maybe_beneficiary) { - (Ok(assets), Ok(beneficiary)) => { - let ticket: Location = GeneralIndex(assets_version as u128).into(); - let mut message = Xcm(vec![ - ClaimAsset { assets: assets.clone(), ticket }, - DepositAsset { assets: AllCounted(assets.len() as u32).into(), beneficiary }, - ]); - T::Weigher::weight(&mut message).map_or(Weight::MAX, |w| T::WeightInfo::claim_assets().saturating_add(w)) - } - _ => Weight::MAX - } - })] pub fn claim_assets( origin: OriginFor, assets: Box, @@ -1452,6 +1405,47 @@ pub mod pallet { })?; Ok(()) } + + /// Execute an XCM from a local, signed, origin. + /// + /// An event is deposited indicating whether the message could be executed completely + /// or only partially. + /// + /// No more than `max_weight` will be used in its attempted execution. If this is less than + /// the maximum amount of weight that the message could take to be executed, then no + /// execution attempt will be made. + /// + /// The message is passed in encoded. It needs to be decodable as a [`VersionedXcm`]. + #[pallet::call_index(13)] + #[pallet::weight(max_weight.saturating_add(T::WeightInfo::execute_blob()))] + pub fn execute_blob( + origin: OriginFor, + encoded_message: BoundedVec, + max_weight: Weight, + ) -> DispatchResultWithPostInfo { + let weight_used = >::execute_blob( + origin, + encoded_message, + max_weight, + )?; + Ok(Some(weight_used.saturating_add(T::WeightInfo::execute_blob())).into()) + } + + /// Send an XCM from a local, signed, origin. + /// + /// The destination, `dest`, will receive this message with a `DescendOrigin` instruction + /// that makes the origin of the message be the origin on this system. + /// + /// The message is passed in encoded. It needs to be decodable as a [`VersionedXcm`]. + #[pallet::call_index(14)] + pub fn send_blob( + origin: OriginFor, + dest: Box, + encoded_message: BoundedVec, + ) -> DispatchResult { + >::send_blob(origin, dest, encoded_message)?; + Ok(()) + } } } @@ -1481,7 +1475,6 @@ impl core::fmt::Debug for FeesHandling { } impl QueryHandler for Pallet { - type QueryId = u64; type BlockNumber = BlockNumberFor; type Error = XcmError; type UniversalLocation = T::UniversalLocation; @@ -1491,7 +1484,7 @@ impl QueryHandler for Pallet { responder: impl Into, timeout: BlockNumberFor, match_querier: impl Into, - ) -> Self::QueryId { + ) -> QueryId { Self::do_new_query(responder, None, timeout, match_querier) } @@ -1501,7 +1494,7 @@ impl QueryHandler for Pallet { message: &mut Xcm<()>, responder: impl Into, timeout: Self::BlockNumber, - ) -> Result { + ) -> Result { let responder = responder.into(); let destination = Self::UniversalLocation::get() .invert_target(&responder) @@ -1514,7 +1507,7 @@ impl QueryHandler for Pallet { } /// Removes response when ready and emits [Event::ResponseTaken] event. - fn take_response(query_id: Self::QueryId) -> QueryResponseStatus { + fn take_response(query_id: QueryId) -> QueryResponseStatus { match Queries::::get(query_id) { Some(QueryStatus::Ready { response, at }) => match response.try_into() { Ok(response) => { @@ -1531,7 +1524,7 @@ impl QueryHandler for Pallet { } #[cfg(feature = "runtime-benchmarks")] - fn expect_response(id: Self::QueryId, response: Response) { + fn expect_response(id: QueryId, response: Response) { let response = response.into(); Queries::::insert( id, @@ -1994,6 +1987,7 @@ impl Pallet { ]); Ok(Xcm(vec![ WithdrawAsset(assets.into()), + SetFeesMode { jit_withdraw: true }, InitiateReserveWithdraw { assets: Wild(AllCounted(max_assets)), reserve, @@ -2366,6 +2360,37 @@ impl Pallet { AccountIdConversion::::into_account_truncating(&ID) } + pub fn query_xcm_weight(message: VersionedXcm<()>) -> Result { + let message = + Xcm::<()>::try_from(message).map_err(|_| FeePaymentError::VersionedConversionFailed)?; + + T::Weigher::weight(&mut message.into()).map_err(|()| { + log::error!(target: "xcm::pallet_xcm::query_xcm_weight", "Error when querying XCM weight"); + FeePaymentError::WeightNotComputable + }) + } + + pub fn query_delivery_fees( + destination: VersionedLocation, + message: VersionedXcm<()>, + ) -> Result { + let result_version = destination.identify_version().max(message.identify_version()); + + let destination = + destination.try_into().map_err(|_| FeePaymentError::VersionedConversionFailed)?; + + let message = message.try_into().map_err(|_| FeePaymentError::VersionedConversionFailed)?; + + let (_, fees) = validate_send::(destination, message).map_err(|error| { + log::error!(target: "xcm::pallet_xcm::query_delivery_fees", "Error when querying delivery fees: {:?}", error); + FeePaymentError::Unroutable + })?; + + VersionedAssets::from(fees) + .into_version(result_version) + .map_err(|_| FeePaymentError::VersionedConversionFailed) + } + /// Create a new expectation of a query response with the querier being here. fn do_new_query( responder: impl Into, diff --git a/polkadot/xcm/pallet-xcm/src/migration.rs b/polkadot/xcm/pallet-xcm/src/migration.rs index 3de68913d818..0aec97ab4105 100644 --- a/polkadot/xcm/pallet-xcm/src/migration.rs +++ b/polkadot/xcm/pallet-xcm/src/migration.rs @@ -19,7 +19,7 @@ use crate::{ }; use frame_support::{ pallet_prelude::*, - traits::{OnRuntimeUpgrade, StorageVersion}, + traits::{OnRuntimeUpgrade, StorageVersion, UncheckedOnRuntimeUpgrade}, weights::Weight, }; @@ -35,7 +35,7 @@ pub mod v1 { /// /// Use experimental [`MigrateToV1`] instead. pub struct VersionUncheckedMigrateToV1(core::marker::PhantomData); - impl OnRuntimeUpgrade for VersionUncheckedMigrateToV1 { + impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateToV1 { fn on_runtime_upgrade() -> Weight { let mut weight = T::DbWeight::get().reads(1); diff --git a/polkadot/xcm/pallet-xcm/src/mock.rs b/polkadot/xcm/pallet-xcm/src/mock.rs index 35660d0a169c..40e7a453d62b 100644 --- a/polkadot/xcm/pallet-xcm/src/mock.rs +++ b/polkadot/xcm/pallet-xcm/src/mock.rs @@ -361,6 +361,10 @@ parameter_types! { 0, [Parachain(FOREIGN_ASSET_RESERVE_PARA_ID)] ); + pub PaidParaForeignReserveLocation: Location = Location::new( + 0, + [Parachain(Para3000::get())] + ); pub ForeignAsset: Asset = Asset { fun: Fungible(10), id: AssetId(Location::new( @@ -368,6 +372,13 @@ parameter_types! { [Parachain(FOREIGN_ASSET_RESERVE_PARA_ID), FOREIGN_ASSET_INNER_JUNCTION], )), }; + pub PaidParaForeignAsset: Asset = Asset { + fun: Fungible(10), + id: AssetId(Location::new( + 0, + [Parachain(Para3000::get())], + )), + }; pub UsdcReserveLocation: Location = Location::new( 0, [Parachain(USDC_RESERVE_PARA_ID)] @@ -450,6 +461,8 @@ parameter_types! { pub TrustedFilteredTeleport: (AssetFilter, Location) = (FilteredTeleportAsset::get().into(), FilteredTeleportLocation::get()); pub TeleportUsdtToForeign: (AssetFilter, Location) = (Usdt::get().into(), ForeignReserveLocation::get()); pub TrustedForeign: (AssetFilter, Location) = (ForeignAsset::get().into(), ForeignReserveLocation::get()); + pub TrustedPaidParaForeign: (AssetFilter, Location) = (PaidParaForeignAsset::get().into(), PaidParaForeignReserveLocation::get()); + pub TrustedUsdc: (AssetFilter, Location) = (Usdc::get().into(), UsdcReserveLocation::get()); pub const MaxInstructions: u32 = 100; pub const MaxAssetsIntoHolding: u32 = 64; @@ -483,7 +496,7 @@ impl xcm_executor::Config for XcmConfig { type XcmSender = XcmRouter; type AssetTransactor = AssetTransactors; type OriginConverter = LocalOriginConverter; - type IsReserve = (Case, Case); + type IsReserve = (Case, Case, Case); type IsTeleporter = ( Case, Case, @@ -513,6 +526,9 @@ impl xcm_executor::Config for XcmConfig { type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); } pub type LocalOriginToLocation = SignedToAccountId32; diff --git a/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs index 27be5cce1458..7dc05c1cc70e 100644 --- a/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs @@ -31,13 +31,17 @@ use sp_runtime::{traits::AccountIdConversion, DispatchError, ModuleError}; use xcm::prelude::*; use xcm_executor::traits::ConvertLocation; -// Helper function to deduplicate testing different teleport types. -fn do_test_and_verify_teleport_assets( - origin_location: Location, - expected_beneficiary: Location, - call: Call, - expected_weight_limit: WeightLimit, -) { +/// Test `limited_teleport_assets` +/// +/// Asserts that the sender's balance is decreased as a result of execution of +/// local effects. +#[test] +fn limited_teleport_assets_works() { + let origin_location: Location = AccountId32 { network: None, id: ALICE.into() }.into(); + let expected_beneficiary: Location = AccountId32 { network: None, id: BOB.into() }.into(); + let weight_limit = WeightLimit::Limited(Weight::from_parts(5000, 5000)); + let expected_weight_limit = weight_limit.clone(); + let balances = vec![ (ALICE, INITIAL_BALANCE), (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), @@ -47,7 +51,14 @@ fn do_test_and_verify_teleport_assets( let weight = BaseXcmWeight::get() * 2; assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); // call extrinsic - call(); + assert_ok!(XcmPallet::limited_teleport_assets( + RuntimeOrigin::signed(ALICE), + Box::new(RelayLocation::get().into()), + Box::new(expected_beneficiary.clone().into()), + Box::new((Here, SEND_AMOUNT).into()), + 0, + weight_limit, + )); assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); assert_eq!( sent_xcm(), @@ -88,57 +99,6 @@ fn do_test_and_verify_teleport_assets( }); } -/// Test `teleport_assets` -/// -/// Asserts that the sender's balance is decreased as a result of execution of -/// local effects. -#[test] -fn teleport_assets_works() { - let origin_location: Location = AccountId32 { network: None, id: ALICE.into() }.into(); - let beneficiary: Location = AccountId32 { network: None, id: BOB.into() }.into(); - do_test_and_verify_teleport_assets( - origin_location.clone(), - beneficiary.clone(), - || { - assert_ok!(XcmPallet::teleport_assets( - RuntimeOrigin::signed(ALICE), - Box::new(RelayLocation::get().into()), - Box::new(beneficiary.into()), - Box::new((Here, SEND_AMOUNT).into()), - 0, - )); - }, - Unlimited, - ); -} - -/// Test `limited_teleport_assets` -/// -/// Asserts that the sender's balance is decreased as a result of execution of -/// local effects. -#[test] -fn limited_teleport_assets_works() { - let origin_location: Location = AccountId32 { network: None, id: ALICE.into() }.into(); - let beneficiary: Location = AccountId32 { network: None, id: BOB.into() }.into(); - let weight_limit = WeightLimit::Limited(Weight::from_parts(5000, 5000)); - let expected_weight_limit = weight_limit.clone(); - do_test_and_verify_teleport_assets( - origin_location.clone(), - beneficiary.clone(), - || { - assert_ok!(XcmPallet::limited_teleport_assets( - RuntimeOrigin::signed(ALICE), - Box::new(RelayLocation::get().into()), - Box::new(beneficiary.into()), - Box::new((Here, SEND_AMOUNT).into()), - 0, - weight_limit, - )); - }, - expected_weight_limit, - ); -} - /// `limited_teleport_assets` should fail for filtered assets #[test] fn limited_teleport_filtered_assets_disallowed() { @@ -184,12 +144,13 @@ fn reserve_transfer_assets_with_paid_router_works() { let dest: Location = Junction::AccountId32 { network: None, id: user_account.clone().into() }.into(); assert_eq!(Balances::total_balance(&user_account), INITIAL_BALANCE); - assert_ok!(XcmPallet::reserve_transfer_assets( + assert_ok!(XcmPallet::limited_reserve_transfer_assets( RuntimeOrigin::signed(user_account.clone()), Box::new(Parachain(paid_para_id).into()), Box::new(dest.clone().into()), Box::new((Here, SEND_AMOUNT).into()), 0, + Unlimited, )); // XCM_FEES_NOT_WAIVED_USER_ACCOUNT spent amount @@ -248,7 +209,7 @@ fn reserve_transfer_assets_with_paid_router_works() { pub(crate) fn set_up_foreign_asset( reserve_para_id: u32, inner_junction: Option, - benficiary: AccountId, + beneficiary: AccountId, initial_amount: u128, is_sufficient: bool, ) -> (Location, AccountId, Location) { @@ -276,7 +237,7 @@ pub(crate) fn set_up_foreign_asset( assert_ok!(AssetsPallet::mint( RuntimeOrigin::signed(BOB), foreign_asset_id_location.clone(), - benficiary, + beneficiary, initial_amount )); @@ -2604,3 +2565,174 @@ fn teleport_assets_using_destination_reserve_fee_disallowed() { expected_result, ); } + +/// Test `tested_call` transferring single asset using remote reserve. +/// +/// Transferring Para3000 asset (`Para3000` reserve) to +/// `OTHER_PARA_ID` (no teleport trust), therefore triggering remote reserve. +/// Using the same asset asset (Para3000 reserve) for fees. +/// +/// Asserts that the sender's balance is decreased and the beneficiary's balance +/// is increased. Verifies the correct message is sent and event is emitted. +/// +/// Verifies that XCM router fees (`SendXcm::validate` -> `Assets`) are withdrawn from correct +/// user account and deposited to a correct target account (`XcmFeesTargetAccount`). +/// Verifies `expected_result`. +fn remote_asset_reserve_and_remote_fee_reserve_paid_call( + tested_call: Call, + expected_result: DispatchResult, +) where + Call: FnOnce( + OriginFor, + Box, + Box, + Box, + u32, + WeightLimit, + ) -> DispatchResult, +{ + let weight = BaseXcmWeight::get() * 3; + let user_account = AccountId::from(XCM_FEES_NOT_WAIVED_USER_ACCOUNT); + let xcm_router_fee_amount = Para3000PaymentAmount::get(); + let paid_para_id = Para3000::get(); + let balances = vec![ + (user_account.clone(), INITIAL_BALANCE), + (ParaId::from(paid_para_id).into_account_truncating(), INITIAL_BALANCE), + (XcmFeesTargetAccount::get(), INITIAL_BALANCE), + ]; + let beneficiary: Location = Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset BLA + let foreign_initial_amount = 142; + let (reserve_location, _, foreign_asset_id_location) = set_up_foreign_asset( + paid_para_id, + None, + user_account.clone(), + foreign_initial_amount, + true, + ); + + // transfer destination is another chain that is not the reserve location + // the goal is to trigger the remoteReserve case + let dest = RelayLocation::get().pushed_with_interior(Parachain(OTHER_PARA_ID)).unwrap(); + + let transferred_asset: Assets = (foreign_asset_id_location.clone(), SEND_AMOUNT).into(); + + // balances checks before + assert_eq!( + AssetsPallet::balance(foreign_asset_id_location.clone(), user_account.clone()), + foreign_initial_amount + ); + assert_eq!(Balances::free_balance(user_account.clone()), INITIAL_BALANCE); + + // do the transfer + let result = tested_call( + RuntimeOrigin::signed(user_account.clone()), + Box::new(dest.clone().into()), + Box::new(beneficiary.clone().into()), + Box::new(transferred_asset.into()), + 0 as u32, + Unlimited, + ); + assert_eq!(result, expected_result); + if expected_result.is_err() { + // short-circuit here for tests where we expect failure + return; + } + + let mut last_events = last_events(7).into_iter(); + // asset events + // forceCreate + last_events.next().unwrap(); + // mint tokens + last_events.next().unwrap(); + // burn tokens + last_events.next().unwrap(); + // balance events + // burn delivery fee + last_events.next().unwrap(); + // mint delivery fee + last_events.next().unwrap(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { + outcome: Outcome::Complete { used: weight } + }) + ); + + // user account spent (transferred) amount + assert_eq!( + AssetsPallet::balance(foreign_asset_id_location.clone(), user_account.clone()), + foreign_initial_amount - SEND_AMOUNT + ); + + // user account spent delivery fees + assert_eq!(Balances::free_balance(user_account), INITIAL_BALANCE - xcm_router_fee_amount); + + // XcmFeesTargetAccount where should lend xcm_router_fee_amount + assert_eq!( + Balances::free_balance(XcmFeesTargetAccount::get()), + INITIAL_BALANCE + xcm_router_fee_amount + ); + + // Verify total and active issuance of foreign BLA have decreased (burned on + // reserve-withdraw) + let expected_issuance = foreign_initial_amount - SEND_AMOUNT; + assert_eq!( + AssetsPallet::total_issuance(foreign_asset_id_location.clone()), + expected_issuance + ); + assert_eq!( + AssetsPallet::active_issuance(foreign_asset_id_location.clone()), + expected_issuance + ); + + let context = UniversalLocation::get(); + let foreign_id_location_reanchored = + foreign_asset_id_location.reanchored(&dest, &context).unwrap(); + let dest_reanchored = dest.reanchored(&reserve_location, &context).unwrap(); + + // Verify sent XCM program + assert_eq!( + sent_xcm(), + vec![( + reserve_location, + // `assets` are burned on source and withdrawn from SA in remote reserve chain + Xcm(vec![ + WithdrawAsset((Location::here(), SEND_AMOUNT).into()), + ClearOrigin, + buy_execution((Location::here(), SEND_AMOUNT / 2)), + DepositReserveAsset { + assets: Wild(AllCounted(1)), + // final destination is `dest` as seen by `reserve` + dest: dest_reanchored, + // message sent onward to `dest` + xcm: Xcm(vec![ + buy_execution((foreign_id_location_reanchored, SEND_AMOUNT / 2)), + DepositAsset { assets: AllCounted(1).into(), beneficiary } + ]) + } + ]) + )] + ); + }); +} +/// Test `transfer_assets` with remote asset reserve and remote fee reserve. +#[test] +fn transfer_assets_with_remote_asset_reserve_and_remote_asset_fee_reserve_paid_works() { + let expected_result = Ok(()); + remote_asset_reserve_and_remote_fee_reserve_paid_call( + XcmPallet::transfer_assets, + expected_result, + ); +} +/// Test `limited_reserve_transfer_assets` with remote asset reserve and remote fee reserve. +#[test] +fn limited_reserve_transfer_assets_with_remote_asset_reserve_and_remote_asset_fee_reserve_paid_works( +) { + let expected_result = Ok(()); + remote_asset_reserve_and_remote_fee_reserve_paid_call( + XcmPallet::limited_reserve_transfer_assets, + expected_result, + ); +} diff --git a/polkadot/xcm/pallet-xcm/src/tests/mod.rs b/polkadot/xcm/pallet-xcm/src/tests/mod.rs index 13022d9a8b1f..763d768e154a 100644 --- a/polkadot/xcm/pallet-xcm/src/tests/mod.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/mod.rs @@ -20,10 +20,10 @@ pub(crate) mod assets_transfer; use crate::{ mock::*, pallet::SupportedVersion, AssetTraps, Config, CurrentMigration, Error, - ExecuteControllerWeightInfo, LatestVersionedLocation, Pallet, Queries, QueryStatus, - VersionDiscoveryQueue, VersionMigrationStage, VersionNotifiers, VersionNotifyTargets, - WeightInfo, + LatestVersionedLocation, Pallet, Queries, QueryStatus, VersionDiscoveryQueue, + VersionMigrationStage, VersionNotifiers, VersionNotifyTargets, WeightInfo, }; +use codec::Encode; use frame_support::{ assert_err_ignore_postinfo, assert_noop, assert_ok, traits::{Currency, Hooks}, @@ -305,11 +305,12 @@ fn send_works() { ]); let versioned_dest = Box::new(RelayLocation::get().into()); - let versioned_message = Box::new(VersionedXcm::from(message.clone())); - assert_ok!(XcmPallet::send( + let versioned_message = VersionedXcm::from(message.clone()); + let encoded_versioned_message = versioned_message.encode().try_into().unwrap(); + assert_ok!(XcmPallet::send_blob( RuntimeOrigin::signed(ALICE), versioned_dest, - versioned_message + encoded_versioned_message )); let sent_message = Xcm(Some(DescendOrigin(sender.clone().try_into().unwrap())) .into_iter() @@ -341,16 +342,16 @@ fn send_fails_when_xcm_router_blocks() { ]; new_test_ext_with_balances(balances).execute_with(|| { let sender: Location = Junction::AccountId32 { network: None, id: ALICE.into() }.into(); - let message = Xcm(vec![ + let message = Xcm::<()>(vec![ ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), buy_execution((Parent, SEND_AMOUNT)), DepositAsset { assets: AllCounted(1).into(), beneficiary: sender }, ]); assert_noop!( - XcmPallet::send( + XcmPallet::send_blob( RuntimeOrigin::signed(ALICE), Box::new(Location::ancestor(8).into()), - Box::new(VersionedXcm::from(message.clone())), + VersionedXcm::from(message.clone()).encode().try_into().unwrap(), ), crate::Error::::SendFailure ); @@ -371,13 +372,16 @@ fn execute_withdraw_to_deposit_works() { let weight = BaseXcmWeight::get() * 3; let dest: Location = Junction::AccountId32 { network: None, id: BOB.into() }.into(); assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); - assert_ok!(XcmPallet::execute( + assert_ok!(XcmPallet::execute_blob( RuntimeOrigin::signed(ALICE), - Box::new(VersionedXcm::from(Xcm(vec![ + VersionedXcm::from(Xcm::(vec![ WithdrawAsset((Here, SEND_AMOUNT).into()), buy_execution((Here, SEND_AMOUNT)), DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]))), + ])) + .encode() + .try_into() + .unwrap(), weight )); assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); @@ -399,18 +403,21 @@ fn trapped_assets_can_be_claimed() { let weight = BaseXcmWeight::get() * 6; let dest: Location = Junction::AccountId32 { network: None, id: BOB.into() }.into(); - assert_ok!(XcmPallet::execute( + assert_ok!(XcmPallet::execute_blob( RuntimeOrigin::signed(ALICE), - Box::new(VersionedXcm::from(Xcm(vec![ + VersionedXcm::from(Xcm(vec![ WithdrawAsset((Here, SEND_AMOUNT).into()), buy_execution((Here, SEND_AMOUNT)), // Don't propagated the error into the result. - SetErrorHandler(Xcm(vec![ClearError])), + SetErrorHandler(Xcm::(vec![ClearError])), // This will make an error. Trap(0), // This would succeed, but we never get to it. DepositAsset { assets: AllCounted(1).into(), beneficiary: dest.clone() }, - ]))), + ])) + .encode() + .try_into() + .unwrap(), weight )); let source: Location = Junction::AccountId32 { network: None, id: ALICE.into() }.into(); @@ -437,13 +444,16 @@ fn trapped_assets_can_be_claimed() { assert_eq!(trapped, expected); let weight = BaseXcmWeight::get() * 3; - assert_ok!(XcmPallet::execute( + assert_ok!(XcmPallet::execute_blob( RuntimeOrigin::signed(ALICE), - Box::new(VersionedXcm::from(Xcm(vec![ + VersionedXcm::from(Xcm::(vec![ ClaimAsset { assets: (Here, SEND_AMOUNT).into(), ticket: Here.into() }, buy_execution((Here, SEND_AMOUNT)), DepositAsset { assets: AllCounted(1).into(), beneficiary: dest.clone() }, - ]))), + ])) + .encode() + .try_into() + .unwrap(), weight )); @@ -453,13 +463,16 @@ fn trapped_assets_can_be_claimed() { // Can't claim twice. assert_err_ignore_postinfo!( - XcmPallet::execute( + XcmPallet::execute_blob( RuntimeOrigin::signed(ALICE), - Box::new(VersionedXcm::from(Xcm(vec![ + VersionedXcm::from(Xcm::(vec![ ClaimAsset { assets: (Here, SEND_AMOUNT).into(), ticket: Here.into() }, buy_execution((Here, SEND_AMOUNT)), DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]))), + ])) + .encode() + .try_into() + .unwrap(), weight ), Error::::LocalExecutionIncomplete @@ -473,12 +486,13 @@ fn claim_assets_works() { let balances = vec![(ALICE, INITIAL_BALANCE)]; new_test_ext_with_balances(balances).execute_with(|| { // First trap some assets. - let trapping_program = - Xcm::builder_unsafe().withdraw_asset((Here, SEND_AMOUNT).into()).build(); + let trapping_program = Xcm::::builder_unsafe() + .withdraw_asset((Here, SEND_AMOUNT).into()) + .build(); // Even though assets are trapped, the extrinsic returns success. - assert_ok!(XcmPallet::execute( + assert_ok!(XcmPallet::execute_blob( RuntimeOrigin::signed(ALICE), - Box::new(VersionedXcm::V4(trapping_program)), + VersionedXcm::V4(trapping_program).encode().try_into().unwrap(), BaseXcmWeight::get() * 2, )); assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); @@ -531,9 +545,9 @@ fn incomplete_execute_reverts_side_effects() { assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); let amount_to_send = INITIAL_BALANCE - ExistentialDeposit::get(); let assets: Assets = (Here, amount_to_send).into(); - let result = XcmPallet::execute( + let result = XcmPallet::execute_blob( RuntimeOrigin::signed(ALICE), - Box::new(VersionedXcm::from(Xcm(vec![ + VersionedXcm::from(Xcm::(vec![ // Withdraw + BuyExec + Deposit should work WithdrawAsset(assets.clone()), buy_execution(assets.inner()[0].clone()), @@ -541,7 +555,10 @@ fn incomplete_execute_reverts_side_effects() { // Withdrawing once more will fail because of InsufficientBalance, and we expect to // revert the effects of the above instructions as well WithdrawAsset(assets), - ]))), + ])) + .encode() + .try_into() + .unwrap(), weight, ); // all effects are reverted and balances unchanged for either sender or receiver @@ -553,7 +570,7 @@ fn incomplete_execute_reverts_side_effects() { Err(sp_runtime::DispatchErrorWithPostInfo { post_info: frame_support::dispatch::PostDispatchInfo { actual_weight: Some( - as ExecuteControllerWeightInfo>::execute() + weight + <::WeightInfo>::execute_blob() + weight ), pays_fee: frame_support::dispatch::Pays::Yes, }, diff --git a/polkadot/xcm/src/lib.rs b/polkadot/xcm/src/lib.rs index ba8d726aecff..e836486e86e3 100644 --- a/polkadot/xcm/src/lib.rs +++ b/polkadot/xcm/src/lib.rs @@ -48,6 +48,9 @@ mod tests; /// Maximum nesting level for XCM decoding. pub const MAX_XCM_DECODE_DEPTH: u32 = 8; +/// Maximum encoded size. +/// See `decoding_respects_limit` test for more reasoning behind this value. +pub const MAX_XCM_ENCODED_SIZE: u32 = 12402; /// A version of XCM. pub type Version = u32; @@ -443,6 +446,16 @@ impl IntoVersion for VersionedXcm { } } +impl IdentifyVersion for VersionedXcm { + fn identify_version(&self) -> Version { + match self { + Self::V2(_) => v2::VERSION, + Self::V3(_) => v3::VERSION, + Self::V4(_) => v4::VERSION, + } + } +} + impl From> for VersionedXcm { fn from(x: v2::Xcm) -> Self { VersionedXcm::V2(x) diff --git a/polkadot/xcm/src/v2/multilocation.rs b/polkadot/xcm/src/v2/multilocation.rs index 60aa1f6ceadf..ac98da8d08c9 100644 --- a/polkadot/xcm/src/v2/multilocation.rs +++ b/polkadot/xcm/src/v2/multilocation.rs @@ -176,7 +176,7 @@ impl MultiLocation { } /// Consumes `self` and returns a `MultiLocation` suffixed with `new`, or an `Err` with - /// theoriginal value of `self` in case of overflow. + /// the original value of `self` in case of overflow. pub fn pushed_with_interior(self, new: Junction) -> result::Result { match self.interior.pushed_with(new) { Ok(i) => Ok(MultiLocation { interior: i, parents: self.parents }), diff --git a/polkadot/xcm/src/v3/multilocation.rs b/polkadot/xcm/src/v3/multilocation.rs index 2aa02a4e7329..8f7d383440bb 100644 --- a/polkadot/xcm/src/v3/multilocation.rs +++ b/polkadot/xcm/src/v3/multilocation.rs @@ -202,7 +202,7 @@ impl MultiLocation { } /// Consumes `self` and returns a `MultiLocation` suffixed with `new`, or an `Err` with - /// theoriginal value of `self` in case of overflow. + /// the original value of `self` in case of overflow. pub fn pushed_with_interior( self, new: impl Into, diff --git a/polkadot/xcm/src/v4/location.rs b/polkadot/xcm/src/v4/location.rs index 4244be90e81f..a04502fbd5ce 100644 --- a/polkadot/xcm/src/v4/location.rs +++ b/polkadot/xcm/src/v4/location.rs @@ -207,7 +207,7 @@ impl Location { } /// Consumes `self` and returns a `Location` suffixed with `new`, or an `Err` with - /// theoriginal value of `self` in case of overflow. + /// the original value of `self` in case of overflow. pub fn pushed_with_interior( self, new: impl Into, diff --git a/polkadot/xcm/src/v4/mod.rs b/polkadot/xcm/src/v4/mod.rs index 5cabe3cf69a1..1ada4f0c3b4b 100644 --- a/polkadot/xcm/src/v4/mod.rs +++ b/polkadot/xcm/src/v4/mod.rs @@ -1485,7 +1485,21 @@ mod tests { let encoded = big_xcm.encode(); assert!(Xcm::<()>::decode(&mut &encoded[..]).is_err()); - let nested_xcm = Xcm::<()>(vec![ + let mut many_assets = Assets::new(); + for index in 0..MAX_ITEMS_IN_ASSETS { + many_assets.push((GeneralIndex(index as u128), 1u128).into()); + } + + let full_xcm_pass = + Xcm::<()>(vec![ + TransferAsset { assets: many_assets, beneficiary: Here.into() }; + MAX_INSTRUCTIONS_TO_DECODE as usize + ]); + let encoded = full_xcm_pass.encode(); + assert_eq!(encoded.len(), 12402); + assert!(Xcm::<()>::decode(&mut &encoded[..]).is_ok()); + + let nested_xcm_fail = Xcm::<()>(vec![ DepositReserveAsset { assets: All.into(), dest: Here.into(), @@ -1493,10 +1507,10 @@ mod tests { }; (MAX_INSTRUCTIONS_TO_DECODE / 2) as usize ]); - let encoded = nested_xcm.encode(); + let encoded = nested_xcm_fail.encode(); assert!(Xcm::<()>::decode(&mut &encoded[..]).is_err()); - let even_more_nested_xcm = Xcm::<()>(vec![SetAppendix(nested_xcm); 64]); + let even_more_nested_xcm = Xcm::<()>(vec![SetAppendix(nested_xcm_fail); 64]); let encoded = even_more_nested_xcm.encode(); assert_eq!(encoded.len(), 342530); // This should not decode since the limit is 100 diff --git a/polkadot/xcm/xcm-builder/Cargo.toml b/polkadot/xcm/xcm-builder/Cargo.toml index bd1e8022b1f6..0fa206d84cbf 100644 --- a/polkadot/xcm/xcm-builder/Cargo.toml +++ b/polkadot/xcm/xcm-builder/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] impl-trait-for-tuples = "0.2.1" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } xcm = { package = "staging-xcm", path = "..", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../xcm-executor", default-features = false } sp-arithmetic = { path = "../../../substrate/primitives/arithmetic", default-features = false } diff --git a/polkadot/xcm/xcm-builder/src/barriers.rs b/polkadot/xcm/xcm-builder/src/barriers.rs index db8ce00f10f9..7bcca0f523ae 100644 --- a/polkadot/xcm/xcm-builder/src/barriers.rs +++ b/polkadot/xcm/xcm-builder/src/barriers.rs @@ -142,7 +142,7 @@ impl> ShouldExecute for AllowTopLevelPaidExecutionFrom /// In the above example, `AllowUnpaidExecutionFrom` appears once underneath /// `WithComputedOrigin`. This is in order to distinguish between messages which are notionally /// from a derivative location of `ParentLocation` but that just happened to be sent via -/// `ParentLocaction` rather than messages that were sent by the parent. +/// `ParentLocation` rather than messages that were sent by the parent. /// /// Similarly `AllowTopLevelPaidExecutionFrom` appears twice: once inside of `WithComputedOrigin` /// where we provide the list of origins which are derivative origins, and then secondly outside diff --git a/polkadot/xcm/xcm-builder/src/controller.rs b/polkadot/xcm/xcm-builder/src/controller.rs index e521e6cd941e..82af0b723ae6 100644 --- a/polkadot/xcm/xcm-builder/src/controller.rs +++ b/polkadot/xcm/xcm-builder/src/controller.rs @@ -23,6 +23,7 @@ use alloc::boxed::Box; use frame_support::{ dispatch::{DispatchErrorWithPostInfo, WithPostDispatchInfo}, pallet_prelude::DispatchError, + parameter_types, BoundedVec, }; use xcm::prelude::*; pub use xcm_executor::traits::QueryHandler; @@ -42,8 +43,12 @@ impl Controller f /// Weight functions needed for [`ExecuteController`]. pub trait ExecuteControllerWeightInfo { - /// Weight for [`ExecuteController::execute`] - fn execute() -> Weight; + /// Weight for [`ExecuteController::execute_blob`] + fn execute_blob() -> Weight; +} + +parameter_types! { + pub const MaxXcmEncodedSize: u32 = xcm::MAX_XCM_ENCODED_SIZE; } /// Execute an XCM locally, for a given origin. @@ -62,19 +67,19 @@ pub trait ExecuteController { /// # Parameters /// /// - `origin`: the origin of the call. - /// - `message`: the XCM program to be executed. + /// - `msg`: the encoded XCM to be executed, should be decodable as a [`VersionedXcm`] /// - `max_weight`: the maximum weight that can be consumed by the execution. - fn execute( + fn execute_blob( origin: Origin, - message: Box>, + message: BoundedVec, max_weight: Weight, ) -> Result; } /// Weight functions needed for [`SendController`]. pub trait SendControllerWeightInfo { - /// Weight for [`SendController::send`] - fn send() -> Weight; + /// Weight for [`SendController::send_blob`] + fn send_blob() -> Weight; } /// Send an XCM from a given origin. @@ -94,11 +99,11 @@ pub trait SendController { /// /// - `origin`: the origin of the call. /// - `dest`: the destination of the message. - /// - `msg`: the XCM to be sent. - fn send( + /// - `msg`: the encoded XCM to be sent, should be decodable as a [`VersionedXcm`] + fn send_blob( origin: Origin, dest: Box, - message: Box>, + message: BoundedVec, ) -> Result; } @@ -133,40 +138,40 @@ pub trait QueryController: QueryHandler { origin: Origin, timeout: Timeout, match_querier: VersionedLocation, - ) -> Result; + ) -> Result; } impl ExecuteController for () { type WeightInfo = (); - fn execute( + fn execute_blob( _origin: Origin, - _message: Box>, + _message: BoundedVec, _max_weight: Weight, ) -> Result { - Err(DispatchError::Other("ExecuteController::execute not implemented") + Err(DispatchError::Other("ExecuteController::execute_blob not implemented") .with_weight(Weight::zero())) } } impl ExecuteControllerWeightInfo for () { - fn execute() -> Weight { + fn execute_blob() -> Weight { Weight::zero() } } impl SendController for () { type WeightInfo = (); - fn send( + fn send_blob( _origin: Origin, _dest: Box, - _message: Box>, + _message: BoundedVec, ) -> Result { Ok(Default::default()) } } impl SendControllerWeightInfo for () { - fn send() -> Weight { + fn send_blob() -> Weight { Weight::zero() } } @@ -187,7 +192,7 @@ impl QueryController for () { _origin: Origin, _timeout: Timeout, _match_querier: VersionedLocation, - ) -> Result { + ) -> Result { Ok(Default::default()) } } diff --git a/polkadot/xcm/xcm-builder/src/lib.rs b/polkadot/xcm/xcm-builder/src/lib.rs index d1c397d0d881..195b77642a59 100644 --- a/polkadot/xcm/xcm-builder/src/lib.rs +++ b/polkadot/xcm/xcm-builder/src/lib.rs @@ -45,7 +45,7 @@ pub use barriers::{ mod controller; pub use controller::{ - Controller, ExecuteController, ExecuteControllerWeightInfo, QueryController, + Controller, ExecuteController, ExecuteControllerWeightInfo, MaxXcmEncodedSize, QueryController, QueryControllerWeightInfo, QueryHandler, SendController, SendControllerWeightInfo, }; diff --git a/polkadot/xcm/xcm-builder/src/pay.rs b/polkadot/xcm/xcm-builder/src/pay.rs index 3d8f9802bc86..978c6870cdaf 100644 --- a/polkadot/xcm/xcm-builder/src/pay.rs +++ b/polkadot/xcm/xcm-builder/src/pay.rs @@ -89,7 +89,7 @@ impl< type Beneficiary = Beneficiary; type AssetKind = AssetKind; type Balance = u128; - type Id = Querier::QueryId; + type Id = QueryId; type Error = xcm::latest::Error; fn pay( diff --git a/polkadot/xcm/xcm-builder/src/tests/barriers.rs b/polkadot/xcm/xcm-builder/src/tests/barriers.rs index 99a9dd5a6609..6516263f57a0 100644 --- a/polkadot/xcm/xcm-builder/src/tests/barriers.rs +++ b/polkadot/xcm/xcm-builder/src/tests/barriers.rs @@ -309,3 +309,62 @@ fn suspension_should_work() { ); assert_eq!(r, Ok(())); } + +#[test] +fn allow_subscriptions_from_should_work() { + // allow only parent + AllowSubsFrom::set(vec![Location::parent()]); + + let valid_xcm_1 = Xcm::(vec![SubscribeVersion { + query_id: 42, + max_response_weight: Weight::from_parts(5000, 5000), + }]); + let valid_xcm_2 = Xcm::(vec![UnsubscribeVersion]); + let invalid_xcm_1 = Xcm::(vec![ + SetAppendix(Xcm(vec![])), + SubscribeVersion { query_id: 42, max_response_weight: Weight::from_parts(5000, 5000) }, + ]); + let invalid_xcm_2 = Xcm::(vec![ + SubscribeVersion { query_id: 42, max_response_weight: Weight::from_parts(5000, 5000) }, + SetTopic([0; 32]), + ]); + + let test_data = vec![ + ( + valid_xcm_1.clone(), + Parachain(1).into_location(), + // not allowed origin + Err(ProcessMessageError::Unsupported), + ), + (valid_xcm_1, Location::parent(), Ok(())), + ( + valid_xcm_2.clone(), + Parachain(1).into_location(), + // not allowed origin + Err(ProcessMessageError::Unsupported), + ), + (valid_xcm_2, Location::parent(), Ok(())), + ( + invalid_xcm_1, + Location::parent(), + // invalid XCM + Err(ProcessMessageError::BadFormat), + ), + ( + invalid_xcm_2, + Location::parent(), + // invalid XCM + Err(ProcessMessageError::BadFormat), + ), + ]; + + for (mut message, origin, expected_result) in test_data { + let r = AllowSubscriptionsFrom::>::should_execute( + &origin, + message.inner_mut(), + Weight::from_parts(10, 10), + &mut props(Weight::zero()), + ); + assert_eq!(r, expected_result, "Failed for origin: {origin:?} and message: {message:?}"); + } +} diff --git a/polkadot/xcm/xcm-builder/src/tests/mock.rs b/polkadot/xcm/xcm-builder/src/tests/mock.rs index d3b44522e4af..95edbd695c06 100644 --- a/polkadot/xcm/xcm-builder/src/tests/mock.rs +++ b/polkadot/xcm/xcm-builder/src/tests/mock.rs @@ -38,7 +38,7 @@ pub use frame_support::{ traits::{Contains, Get, IsInVec}, }; pub use parity_scale_codec::{Decode, Encode}; -pub use xcm::latest::{prelude::*, Weight}; +pub use xcm::latest::{prelude::*, QueryId, Weight}; use xcm_executor::traits::{Properties, QueryHandler, QueryResponseStatus}; pub use xcm_executor::{ traits::{ @@ -414,7 +414,6 @@ pub struct TestQueryHandler(core::marker::PhantomData<(T, BlockN impl QueryHandler for TestQueryHandler { - type QueryId = u64; type BlockNumber = BlockNumber; type Error = XcmError; type UniversalLocation = T::UniversalLocation; @@ -423,7 +422,7 @@ impl QueryHandler responder: impl Into, _timeout: Self::BlockNumber, _match_querier: impl Into, - ) -> Self::QueryId { + ) -> QueryId { let query_id = 1; expect_response(query_id, responder.into()); query_id @@ -433,7 +432,7 @@ impl QueryHandler message: &mut Xcm<()>, responder: impl Into, timeout: Self::BlockNumber, - ) -> Result { + ) -> Result { let responder = responder.into(); let destination = Self::UniversalLocation::get() .invert_target(&responder) @@ -445,7 +444,7 @@ impl QueryHandler Ok(query_id) } - fn take_response(query_id: Self::QueryId) -> QueryResponseStatus { + fn take_response(query_id: QueryId) -> QueryResponseStatus { QUERIES .with(|q| { q.borrow().get(&query_id).and_then(|v| match v { @@ -460,7 +459,7 @@ impl QueryHandler } #[cfg(feature = "runtime-benchmarks")] - fn expect_response(_id: Self::QueryId, _response: xcm::latest::Response) { + fn expect_response(_id: QueryId, _response: xcm::latest::Response) { // Unnecessary since it's only a test implementation } } @@ -743,6 +742,9 @@ impl Config for TestConfig { type SafeCallFilter = Everything; type Aliasers = AliasForeignAccountId32; type TransactionalProcessor = (); + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); } pub fn fungible_multi_asset(location: Location, amount: u128) -> Asset { diff --git a/polkadot/xcm/xcm-builder/src/tests/mod.rs b/polkadot/xcm/xcm-builder/src/tests/mod.rs index e11caf6282be..63d254a10675 100644 --- a/polkadot/xcm/xcm-builder/src/tests/mod.rs +++ b/polkadot/xcm/xcm-builder/src/tests/mod.rs @@ -36,6 +36,7 @@ mod locking; mod origins; mod pay; mod querying; +mod routing; mod transacting; mod version_subscriptions; mod weight; diff --git a/polkadot/xcm/xcm-builder/src/tests/origins.rs b/polkadot/xcm/xcm-builder/src/tests/origins.rs index c717d1e2af8a..b6a1a9f1052a 100644 --- a/polkadot/xcm/xcm-builder/src/tests/origins.rs +++ b/polkadot/xcm/xcm-builder/src/tests/origins.rs @@ -80,8 +80,8 @@ fn universal_origin_should_work() { fn export_message_should_work() { // Bridge chain (assumed to be Relay) lets Parachain #1 have message execution for free. AllowUnpaidFrom::set(vec![[Parachain(1)].into()]); - // Local parachain #1 issues a transfer asset on Polkadot Relay-chain, transfering 100 Planck to - // Polkadot parachain #2. + // Local parachain #1 issues a transfer asset on Polkadot Relay-chain, transferring 100 Planck + // to Polkadot parachain #2. let expected_message = Xcm(vec![TransferAsset { assets: (Here, 100u128).into(), beneficiary: Parachain(2).into(), diff --git a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs index 9feda8fb90b2..019113a12b2f 100644 --- a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs +++ b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs @@ -218,6 +218,9 @@ impl xcm_executor::Config for XcmConfig { type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = (); + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); } parameter_types! { diff --git a/polkadot/xcm/xcm-builder/src/tests/routing.rs b/polkadot/xcm/xcm-builder/src/tests/routing.rs new file mode 100644 index 000000000000..28117d647a08 --- /dev/null +++ b/polkadot/xcm/xcm-builder/src/tests/routing.rs @@ -0,0 +1,95 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use super::*; +use frame_support::{assert_ok, traits::Everything}; +use xcm_executor::traits::Properties; + +fn props() -> Properties { + Properties { weight_credit: Weight::zero(), message_id: None } +} + +#[test] +fn trailing_set_topic_as_id_with_unique_topic_should_work() { + type AllowSubscriptions = AllowSubscriptionsFrom; + + // check the validity of XCM for the `AllowSubscriptions` barrier + let valid_xcm = Xcm::<()>(vec![SubscribeVersion { + query_id: 42, + max_response_weight: Weight::from_parts(5000, 5000), + }]); + assert_eq!( + AllowSubscriptions::should_execute( + &Location::parent(), + valid_xcm.clone().inner_mut(), + Weight::from_parts(10, 10), + &mut props(), + ), + Ok(()) + ); + + // simulate sending `valid_xcm` with the `WithUniqueTopic` router + let mut sent_xcm = sp_io::TestExternalities::default().execute_with(|| { + assert_ok!(send_xcm::>(Location::parent(), valid_xcm,)); + sent_xcm() + }); + assert_eq!(1, sent_xcm.len()); + + // `sent_xcm` should contain `SubscribeVersion` and have `SetTopic` added + let mut sent_xcm = sent_xcm.remove(0).1; + let _ = sent_xcm + .0 + .matcher() + .assert_remaining_insts(2) + .expect("two instructions") + .match_next_inst(|instr| match instr { + SubscribeVersion { .. } => Ok(()), + _ => Err(ProcessMessageError::BadFormat), + }) + .expect("expected instruction `SubscribeVersion`") + .match_next_inst(|instr| match instr { + SetTopic(..) => Ok(()), + _ => Err(ProcessMessageError::BadFormat), + }) + .expect("expected instruction `SetTopic`"); + + // `sent_xcm` contains `SetTopic` and is now invalid for `AllowSubscriptions` + assert_eq!( + AllowSubscriptions::should_execute( + &Location::parent(), + sent_xcm.clone().inner_mut(), + Weight::from_parts(10, 10), + &mut props(), + ), + Err(ProcessMessageError::BadFormat) + ); + + // let's apply `TrailingSetTopicAsId` before `AllowSubscriptions` + let mut props = props(); + assert!(props.message_id.is_none()); + + // should pass, and the `message_id` is set + assert_eq!( + TrailingSetTopicAsId::::should_execute( + &Location::parent(), + sent_xcm.clone().inner_mut(), + Weight::from_parts(10, 10), + &mut props, + ), + Ok(()) + ); + assert!(props.message_id.is_some()); +} diff --git a/polkadot/xcm/xcm-builder/src/tests/version_subscriptions.rs b/polkadot/xcm/xcm-builder/src/tests/version_subscriptions.rs index e29e3a546615..01047fde989f 100644 --- a/polkadot/xcm/xcm-builder/src/tests/version_subscriptions.rs +++ b/polkadot/xcm/xcm-builder/src/tests/version_subscriptions.rs @@ -27,16 +27,32 @@ fn simple_version_subscriptions_should_work() { ]); let mut hash = fake_message_hash(&message); let weight_limit = Weight::from_parts(20, 20); - let r = XcmExecutor::::prepare_and_execute( - origin, - message, - &mut hash, - weight_limit, - Weight::zero(), + + // this case fails because the origin is not allowed + assert_eq!( + XcmExecutor::::prepare_and_execute( + origin, + message.clone(), + &mut hash, + weight_limit, + Weight::zero(), + ), + Outcome::Error { error: XcmError::Barrier } + ); + + // this case fails because the additional `SetAppendix` instruction is not allowed in the + // `AllowSubscriptionsFrom` + assert_eq!( + XcmExecutor::::prepare_and_execute( + Parent, + message, + &mut hash, + weight_limit, + Weight::zero(), + ), + Outcome::Error { error: XcmError::Barrier } ); - assert_eq!(r, Outcome::Error { error: XcmError::Barrier }); - let origin = Parachain(1000); let message = Xcm::(vec![SubscribeVersion { query_id: 42, max_response_weight: Weight::from_parts(5000, 5000), diff --git a/polkadot/xcm/xcm-builder/tests/mock/mod.rs b/polkadot/xcm/xcm-builder/tests/mock/mod.rs index b81a2f8539e9..731f5cbc00d4 100644 --- a/polkadot/xcm/xcm-builder/tests/mock/mod.rs +++ b/polkadot/xcm/xcm-builder/tests/mock/mod.rs @@ -204,6 +204,9 @@ impl xcm_executor::Config for XcmConfig { type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = (); + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); } pub type LocalOriginToLocation = SignedToAccountId32; diff --git a/polkadot/xcm/xcm-builder/tests/scenarios.rs b/polkadot/xcm/xcm-builder/tests/scenarios.rs index e22132d639d8..e5971b00b497 100644 --- a/polkadot/xcm/xcm-builder/tests/scenarios.rs +++ b/polkadot/xcm/xcm-builder/tests/scenarios.rs @@ -134,7 +134,7 @@ fn report_holding_works() { assets: AllCounted(1).into(), beneficiary: OnlyChild.into(), // invalid destination }, - // is not triggered becasue the deposit fails + // is not triggered because the deposit fails ReportHolding { response_info: response_info.clone(), assets: All.into() }, ]); let mut hash = fake_message_hash(&message); diff --git a/polkadot/xcm/xcm-executor/Cargo.toml b/polkadot/xcm/xcm-executor/Cargo.toml index 2c0825498265..b95d8f6098d8 100644 --- a/polkadot/xcm/xcm-executor/Cargo.toml +++ b/polkadot/xcm/xcm-executor/Cargo.toml @@ -13,7 +13,7 @@ workspace = true impl-trait-for-tuples = "0.2.2" environmental = { version = "1.1.4", default-features = false } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } xcm = { package = "staging-xcm", path = "..", default-features = false } sp-io = { path = "../../../substrate/primitives/io", default-features = false } sp-arithmetic = { path = "../../../substrate/primitives/arithmetic", default-features = false } diff --git a/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml b/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml index 1e572e6210a2..9c9c53f0ee1b 100644 --- a/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml +++ b/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml @@ -14,7 +14,7 @@ workspace = true codec = { package = "parity-scale-codec", version = "3.6.1" } frame-support = { path = "../../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../../substrate/frame/system" } -futures = "0.3.21" +futures = "0.3.30" pallet-transaction-payment = { path = "../../../../substrate/frame/transaction-payment" } pallet-xcm = { path = "../../pallet-xcm" } polkadot-test-client = { path = "../../../node/test/client" } diff --git a/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs b/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs index da7fc0d97825..1d1ee40d092c 100644 --- a/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs +++ b/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs @@ -328,7 +328,7 @@ fn query_response_elicits_handler() { /// Simulates a cross-chain message from Parachain to Parachain through Relay Chain /// that deposits assets into the reserve of the destination. -/// Regression test for `DepostiReserveAsset` changes in +/// Regression test for `DepositReserveAsset` changes in /// #[test] fn deposit_reserve_asset_works_for_any_xcm_sender() { diff --git a/polkadot/xcm/xcm-executor/src/config.rs b/polkadot/xcm/xcm-executor/src/config.rs index ebe532a42fd3..b296d32ca2ad 100644 --- a/polkadot/xcm/xcm-executor/src/config.rs +++ b/polkadot/xcm/xcm-executor/src/config.rs @@ -16,7 +16,8 @@ use crate::traits::{ AssetExchange, AssetLock, CallDispatcher, ClaimAssets, ConvertOrigin, DropAssets, ExportXcm, - FeeManager, OnResponse, ProcessTransaction, ShouldExecute, TransactAsset, + FeeManager, HandleHrmpChannelAccepted, HandleHrmpChannelClosing, + HandleHrmpNewChannelOpenRequest, OnResponse, ProcessTransaction, ShouldExecute, TransactAsset, VersionChangeNotifier, WeightBounds, WeightTrader, }; use frame_support::{ @@ -114,4 +115,11 @@ pub trait Config { /// Transactional processor for XCM instructions. type TransactionalProcessor: ProcessTransaction; + + /// Allows optional logic execution for the `HrmpNewChannelOpenRequest` XCM notification. + type HrmpNewChannelOpenRequestHandler: HandleHrmpNewChannelOpenRequest; + /// Allows optional logic execution for the `HrmpChannelAccepted` XCM notification. + type HrmpChannelAcceptedHandler: HandleHrmpChannelAccepted; + /// Allows optional logic execution for the `HrmpChannelClosing` XCM notification. + type HrmpChannelClosingHandler: HandleHrmpChannelClosing; } diff --git a/polkadot/xcm/xcm-executor/src/lib.rs b/polkadot/xcm/xcm-executor/src/lib.rs index 891e40a5fae6..f02d0c7f061d 100644 --- a/polkadot/xcm/xcm-executor/src/lib.rs +++ b/polkadot/xcm/xcm-executor/src/lib.rs @@ -35,7 +35,8 @@ use xcm::latest::prelude::*; pub mod traits; use traits::{ validate_export, AssetExchange, AssetLock, CallDispatcher, ClaimAssets, ConvertOrigin, - DropAssets, Enact, ExportXcm, FeeManager, FeeReason, OnResponse, ProcessTransaction, + DropAssets, Enact, ExportXcm, FeeManager, FeeReason, HandleHrmpChannelAccepted, + HandleHrmpChannelClosing, HandleHrmpNewChannelOpenRequest, OnResponse, ProcessTransaction, Properties, ShouldExecute, TransactAsset, VersionChangeNotifier, WeightBounds, WeightTrader, XcmAssetTransfers, }; @@ -1216,9 +1217,21 @@ impl XcmExecutor { ); Ok(()) }, - HrmpNewChannelOpenRequest { .. } => Err(XcmError::Unimplemented), - HrmpChannelAccepted { .. } => Err(XcmError::Unimplemented), - HrmpChannelClosing { .. } => Err(XcmError::Unimplemented), + HrmpNewChannelOpenRequest { sender, max_message_size, max_capacity } => + Config::TransactionalProcessor::process(|| { + Config::HrmpNewChannelOpenRequestHandler::handle( + sender, + max_message_size, + max_capacity, + ) + }), + HrmpChannelAccepted { recipient } => Config::TransactionalProcessor::process(|| { + Config::HrmpChannelAcceptedHandler::handle(recipient) + }), + HrmpChannelClosing { initiator, sender, recipient } => + Config::TransactionalProcessor::process(|| { + Config::HrmpChannelClosingHandler::handle(initiator, sender, recipient) + }), } } } diff --git a/polkadot/xcm/xcm-executor/src/traits/hrmp.rs b/polkadot/xcm/xcm-executor/src/traits/hrmp.rs new file mode 100644 index 000000000000..b6bbb9316d75 --- /dev/null +++ b/polkadot/xcm/xcm-executor/src/traits/hrmp.rs @@ -0,0 +1,56 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use xcm::latest::Result as XcmResult; + +/// Executes logic when a `HrmpNewChannelOpenRequest` XCM notification is received. +pub trait HandleHrmpNewChannelOpenRequest { + fn handle(sender: u32, max_message_size: u32, max_capacity: u32) -> XcmResult; +} + +/// Executes optional logic when a `HrmpChannelAccepted` XCM notification is received. +pub trait HandleHrmpChannelAccepted { + fn handle(recipient: u32) -> XcmResult; +} + +/// Executes optional logic when a `HrmpChannelClosing` XCM notification is received. +pub trait HandleHrmpChannelClosing { + fn handle(initiator: u32, sender: u32, recipient: u32) -> XcmResult; +} + +#[impl_trait_for_tuples::impl_for_tuples(30)] +impl HandleHrmpNewChannelOpenRequest for Tuple { + fn handle(sender: u32, max_message_size: u32, max_capacity: u32) -> XcmResult { + for_tuples!( #( Tuple::handle(sender, max_message_size, max_capacity)?; )* ); + Ok(()) + } +} + +#[impl_trait_for_tuples::impl_for_tuples(30)] +impl HandleHrmpChannelAccepted for Tuple { + fn handle(recipient: u32) -> XcmResult { + for_tuples!( #( Tuple::handle(recipient)?; )* ); + Ok(()) + } +} + +#[impl_trait_for_tuples::impl_for_tuples(30)] +impl HandleHrmpChannelClosing for Tuple { + fn handle(initiator: u32, sender: u32, recipient: u32) -> XcmResult { + for_tuples!( #( Tuple::handle(initiator, sender, recipient)?; )* ); + Ok(()) + } +} diff --git a/polkadot/xcm/xcm-executor/src/traits/mod.rs b/polkadot/xcm/xcm-executor/src/traits/mod.rs index b445e84d3912..aa3f0d26e302 100644 --- a/polkadot/xcm/xcm-executor/src/traits/mod.rs +++ b/polkadot/xcm/xcm-executor/src/traits/mod.rs @@ -45,6 +45,10 @@ mod should_execute; pub use should_execute::{CheckSuspension, Properties, ShouldExecute}; mod transact_asset; pub use transact_asset::TransactAsset; +mod hrmp; +pub use hrmp::{ + HandleHrmpChannelAccepted, HandleHrmpChannelClosing, HandleHrmpNewChannelOpenRequest, +}; mod weight; #[deprecated = "Use `sp_runtime::traits::` instead"] pub use sp_runtime::traits::{Identity, TryConvertInto as JustTry}; diff --git a/polkadot/xcm/xcm-executor/src/traits/on_response.rs b/polkadot/xcm/xcm-executor/src/traits/on_response.rs index 447bbc94930a..b2bf8047ebf7 100644 --- a/polkadot/xcm/xcm-executor/src/traits/on_response.rs +++ b/polkadot/xcm/xcm-executor/src/traits/on_response.rs @@ -17,10 +17,10 @@ use crate::{Junctions::Here, Xcm}; use core::{fmt::Debug, result}; use frame_support::{ - pallet_prelude::{Get, TypeInfo}, + pallet_prelude::{Get}, parameter_types, }; -use parity_scale_codec::{Decode, Encode, FullCodec, MaxEncodedLen}; +use parity_scale_codec::{Decode, Encode}; use sp_arithmetic::traits::Zero; use xcm::latest::{ Error as XcmError, InteriorLocation, Location, QueryId, Response, Result as XcmResult, Weight, @@ -114,15 +114,6 @@ pub enum QueryResponseStatus { /// Provides methods to expect responses from XCMs and query their status. pub trait QueryHandler { - type QueryId: From - + FullCodec - + MaxEncodedLen - + TypeInfo - + Clone - + Eq - + PartialEq - + Debug - + Copy; type BlockNumber: Zero + Encode; type Error; type UniversalLocation: Get; @@ -150,14 +141,14 @@ pub trait QueryHandler { message: &mut Xcm<()>, responder: impl Into, timeout: Self::BlockNumber, - ) -> result::Result; + ) -> result::Result; /// Attempt to remove and return the response of query with ID `query_id`. - fn take_response(id: Self::QueryId) -> QueryResponseStatus; + fn take_response(id: QueryId) -> QueryResponseStatus; /// Makes sure to expect a response with the given id. #[cfg(feature = "runtime-benchmarks")] - fn expect_response(id: Self::QueryId, response: Response); + fn expect_response(id: QueryId, response: Response); } parameter_types! { @@ -167,17 +158,16 @@ parameter_types! { impl QueryHandler for () { type BlockNumber = u64; type Error = (); - type QueryId = u64; type UniversalLocation = UniversalLocation; - fn take_response(_query_id: Self::QueryId) -> QueryResponseStatus { + fn take_response(_query_id: QueryId) -> QueryResponseStatus { QueryResponseStatus::NotFound } fn new_query( _responder: impl Into, _timeout: Self::BlockNumber, _match_querier: impl Into, - ) -> Self::QueryId { + ) -> QueryId { 0u64 } @@ -185,10 +175,10 @@ impl QueryHandler for () { _message: &mut Xcm<()>, _responder: impl Into, _timeout: Self::BlockNumber, - ) -> Result { + ) -> Result { Err(()) } #[cfg(feature = "runtime-benchmarks")] - fn expect_response(_id: Self::QueryId, _response: crate::Response) {} + fn expect_response(_id: QueryId, _response: crate::Response) {} } diff --git a/polkadot/xcm/xcm-executor/src/traits/should_execute.rs b/polkadot/xcm/xcm-executor/src/traits/should_execute.rs index e952d112b0c9..584331822120 100644 --- a/polkadot/xcm/xcm-executor/src/traits/should_execute.rs +++ b/polkadot/xcm/xcm-executor/src/traits/should_execute.rs @@ -17,7 +17,7 @@ use frame_support::traits::ProcessMessageError; use xcm::latest::{Instruction, Location, Weight, XcmHash}; -/// Properyies of an XCM message and its imminent execution. +/// Properties of an XCM message and its imminent execution. #[derive(Clone, Eq, PartialEq, Debug)] pub struct Properties { /// The amount of weight that the system has determined this @@ -32,9 +32,9 @@ pub struct Properties { /// Trait to determine whether the execution engine should actually execute a given XCM. /// /// Can be amalgamated into a tuple to have multiple trials. If any of the tuple elements returns -/// `Ok()`, the execution stops. Else, `Err(_)` is returned if all elements reject the message. +/// `Ok(())`, the execution stops. Else, `Err(_)` is returned if all elements reject the message. pub trait ShouldExecute { - /// Returns `true` if the given `message` may be executed. + /// Returns `Ok(())` if the given `message` may be executed. /// /// - `origin`: The origin (sender) of the message. /// - `instructions`: The message itself. diff --git a/polkadot/xcm/xcm-fee-payment-runtime-api/Cargo.toml b/polkadot/xcm/xcm-fee-payment-runtime-api/Cargo.toml new file mode 100644 index 000000000000..30c7c0bac14f --- /dev/null +++ b/polkadot/xcm/xcm-fee-payment-runtime-api/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "xcm-fee-payment-runtime-api" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +repository.workspace = true +description = "XCM fee payment runtime API" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ + "derive", +] } + +sp-api = { path = "../../../substrate/primitives/api", default-features = false } +scale-info = { version = "2.11.1", default-features = false, features = [ + "derive", + "serde", +] } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +sp-weights = { path = "../../../substrate/primitives/weights", default-features = false } +xcm = { package = "staging-xcm", path = "../", default-features = false } +frame-support = { path = "../../../substrate/frame/support", default-features = false } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-support/std", + "scale-info/std", + "sp-api/std", + "sp-runtime/std", + "sp-std/std", + "sp-weights/std", + "xcm/std", +] diff --git a/polkadot/xcm/xcm-fee-payment-runtime-api/src/lib.rs b/polkadot/xcm/xcm-fee-payment-runtime-api/src/lib.rs new file mode 100644 index 000000000000..20bf9236f1fb --- /dev/null +++ b/polkadot/xcm/xcm-fee-payment-runtime-api/src/lib.rs @@ -0,0 +1,99 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Runtime API definition for xcm transaction payment. + +#![cfg_attr(not(feature = "std"), no_std)] + +use codec::{Decode, Encode}; +use frame_support::pallet_prelude::TypeInfo; +use sp_std::vec::Vec; +use sp_weights::Weight; +use xcm::{Version, VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm}; + +sp_api::decl_runtime_apis! { + /// A trait of XCM payment API. + /// + /// API provides functionality for obtaining: + /// + /// * the weight required to execute an XCM message, + /// * a list of acceptable `AssetId`s for message execution payment, + /// * the cost of the weight in the specified acceptable `AssetId`. + /// * the fees for an XCM message delivery. + /// + /// To determine the execution weight of the calls required for + /// [`xcm::latest::Instruction::Transact`] instruction, `TransactionPaymentCallApi` can be used. + pub trait XcmPaymentApi { + /// Returns a list of acceptable payment assets. + /// + /// # Arguments + /// + /// * `xcm_version`: Version. + fn query_acceptable_payment_assets(xcm_version: Version) -> Result, Error>; + + /// Returns a weight needed to execute a XCM. + /// + /// # Arguments + /// + /// * `message`: `VersionedXcm`. + fn query_xcm_weight(message: VersionedXcm<()>) -> Result; + + /// Converts a weight into a fee for the specified `AssetId`. + /// + /// # Arguments + /// + /// * `weight`: convertible `Weight`. + /// * `asset`: `VersionedAssetId`. + fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result; + + /// Get delivery fees for sending a specific `message` to a `destination`. + /// These always come in a specific asset, defined by the chain. + /// + /// # Arguments + /// * `message`: The message that'll be sent, necessary because most delivery fees are based on the + /// size of the message. + /// * `destination`: The destination to send the message to. Different destinations may use + /// different senders that charge different fees. + fn query_delivery_fees(destination: VersionedLocation, message: VersionedXcm<()>) -> Result; + } +} + +#[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, Debug, TypeInfo)] +pub enum Error { + /// An API part is unsupported. + #[codec(index = 0)] + Unimplemented, + + /// Converting a versioned data structure from one version to another failed. + #[codec(index = 1)] + VersionedConversionFailed, + + /// XCM message weight calculation failed. + #[codec(index = 2)] + WeightNotComputable, + + /// XCM version not able to be handled. + #[codec(index = 3)] + UnhandledXcmVersion, + + /// The given asset is not handled as a fee asset. + #[codec(index = 4)] + AssetNotFound, + + /// Destination is known to be unroutable. + #[codec(index = 5)] + Unroutable, +} diff --git a/polkadot/xcm/xcm-simulator/example/Cargo.toml b/polkadot/xcm/xcm-simulator/example/Cargo.toml index 38c32680f631..9578a3de984a 100644 --- a/polkadot/xcm/xcm-simulator/example/Cargo.toml +++ b/polkadot/xcm/xcm-simulator/example/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } -scale-info = { version = "2.10.0", features = ["derive"] } +scale-info = { version = "2.11.1", features = ["derive"] } log = { workspace = true } frame-system = { path = "../../../../substrate/frame/system" } diff --git a/polkadot/xcm/xcm-simulator/example/src/lib.rs b/polkadot/xcm/xcm-simulator/example/src/lib.rs index d99728b3f39a..8bf0aab4aed2 100644 --- a/polkadot/xcm/xcm-simulator/example/src/lib.rs +++ b/polkadot/xcm/xcm-simulator/example/src/lib.rs @@ -252,12 +252,13 @@ mod tests { let withdraw_amount = 123; Relay::execute_with(|| { - assert_ok!(RelayChainPalletXcm::reserve_transfer_assets( + assert_ok!(RelayChainPalletXcm::limited_reserve_transfer_assets( relay_chain::RuntimeOrigin::signed(ALICE), Box::new(Parachain(1).into()), Box::new(AccountId32 { network: None, id: ALICE.into() }.into()), Box::new((Here, withdraw_amount).into()), 0, + Unlimited, )); assert_eq!( relay_chain::Balances::free_balance(&child_account_id(1)), @@ -426,7 +427,7 @@ mod tests { /// Scenario: /// The relay-chain transfers an NFT into a parachain's sovereign account, who then mints a - /// trustless-backed-derivated locally. + /// trustless-backed-derived locally. /// /// Asserts that the parachain accounts are updated as expected. #[test] @@ -481,7 +482,7 @@ mod tests { assert_ok!(ParachainPalletXcm::send_xcm(alice, Parent, message)); }); ParaA::execute_with(|| { - log::debug!(target: "xcm-exceutor", "Hello"); + log::debug!(target: "xcm-executor", "Hello"); assert_eq!( parachain::ForeignUniques::owner((Parent, GeneralIndex(2)).into(), 69u32.into()), Some(ALICE), diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain.rs b/polkadot/xcm/xcm-simulator/example/src/parachain.rs index 3993e15058e2..80ba8452a8e7 100644 --- a/polkadot/xcm/xcm-simulator/example/src/parachain.rs +++ b/polkadot/xcm/xcm-simulator/example/src/parachain.rs @@ -250,6 +250,9 @@ impl Config for XcmConfig { type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); } #[frame_support::pallet] diff --git a/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs b/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs index f41e273839b4..286d0038e187 100644 --- a/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs +++ b/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs @@ -198,6 +198,9 @@ impl Config for XcmConfig { type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); } pub type LocalOriginToLocation = SignedToAccountId32; @@ -272,6 +275,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = MessageQueueHeapSize; type MaxStale = MessageQueueMaxStale; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = (); type MessageProcessor = MessageProcessor; type QueueChangeHandler = (); type QueuePausedQuery = (); diff --git a/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml b/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml index f1ec04c6c8ef..76b7df1ce178 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml +++ b/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml @@ -14,7 +14,7 @@ workspace = true codec = { package = "parity-scale-codec", version = "3.6.1" } honggfuzz = "0.5.55" arbitrary = "1.3.2" -scale-info = { version = "2.10.0", features = ["derive"] } +scale-info = { version = "2.11.1", features = ["derive"] } frame-system = { path = "../../../../substrate/frame/system" } frame-support = { path = "../../../../substrate/frame/support" } diff --git a/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs b/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs index 170a0242d361..31c3b59e04a4 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs +++ b/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs @@ -156,6 +156,9 @@ impl Config for XcmConfig { type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); } #[frame_support::pallet] diff --git a/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs b/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs index ed354f14a095..eb1d9c8b8e58 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs +++ b/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs @@ -159,6 +159,9 @@ impl Config for XcmConfig { type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); } pub type LocalOriginToLocation = SignedToAccountId32; @@ -231,6 +234,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = MessageQueueHeapSize; type MaxStale = MessageQueueMaxStale; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = (); #[cfg(not(feature = "runtime-benchmarks"))] type MessageProcessor = MessageProcessor; #[cfg(feature = "runtime-benchmarks")] diff --git a/polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.zndsl b/polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.zndsl index 62d5a9768f9e..d1ed0250d4da 100644 --- a/polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.zndsl +++ b/polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.zndsl @@ -21,9 +21,9 @@ honest: reports polkadot_parachain_candidate_dispute_concluded{validity="valid"} honest: reports polkadot_parachain_candidate_dispute_concluded{validity="invalid"} is 0 within 100 seconds # Check lag - approval -honest: reports polkadot_parachain_approval_checking_finality_lag is 0 +honest: reports polkadot_parachain_approval_checking_finality_lag is lower than 2 # Check lag - dispute conclusion -honest: reports polkadot_parachain_disputes_finality_lag is 0 +honest: reports polkadot_parachain_disputes_finality_lag is lower than 2 diff --git a/prdoc/.template.prdoc b/prdoc/.template.prdoc index 097741f388c4..03a458876dfa 100644 --- a/prdoc/.template.prdoc +++ b/prdoc/.template.prdoc @@ -4,7 +4,7 @@ title: ... doc: - - audience: Node Dev + - audience: ... description: | ... diff --git a/prdoc/pr_1378.prdoc b/prdoc/1.9.0/pr_1378.prdoc similarity index 100% rename from prdoc/pr_1378.prdoc rename to prdoc/1.9.0/pr_1378.prdoc diff --git a/prdoc/pr_1554.prdoc b/prdoc/1.9.0/pr_1554.prdoc similarity index 100% rename from prdoc/pr_1554.prdoc rename to prdoc/1.9.0/pr_1554.prdoc diff --git a/prdoc/pr_1781.prdoc b/prdoc/1.9.0/pr_1781.prdoc similarity index 100% rename from prdoc/pr_1781.prdoc rename to prdoc/1.9.0/pr_1781.prdoc diff --git a/prdoc/pr_2393.prdoc b/prdoc/1.9.0/pr_2393.prdoc similarity index 100% rename from prdoc/pr_2393.prdoc rename to prdoc/1.9.0/pr_2393.prdoc diff --git a/prdoc/pr_3002.prdoc b/prdoc/1.9.0/pr_3002.prdoc similarity index 100% rename from prdoc/pr_3002.prdoc rename to prdoc/1.9.0/pr_3002.prdoc diff --git a/prdoc/pr_3187.prdoc b/prdoc/1.9.0/pr_3187.prdoc similarity index 100% rename from prdoc/pr_3187.prdoc rename to prdoc/1.9.0/pr_3187.prdoc diff --git a/prdoc/pr_3231.prdoc b/prdoc/1.9.0/pr_3231.prdoc similarity index 100% rename from prdoc/pr_3231.prdoc rename to prdoc/1.9.0/pr_3231.prdoc diff --git a/prdoc/pr_3233.prdoc b/prdoc/1.9.0/pr_3233.prdoc similarity index 100% rename from prdoc/pr_3233.prdoc rename to prdoc/1.9.0/pr_3233.prdoc diff --git a/prdoc/pr_3324.prdoc b/prdoc/1.9.0/pr_3324.prdoc similarity index 100% rename from prdoc/pr_3324.prdoc rename to prdoc/1.9.0/pr_3324.prdoc diff --git a/prdoc/pr_3371.prdoc b/prdoc/1.9.0/pr_3371.prdoc similarity index 100% rename from prdoc/pr_3371.prdoc rename to prdoc/1.9.0/pr_3371.prdoc diff --git a/prdoc/pr_3377.prdoc b/prdoc/1.9.0/pr_3377.prdoc similarity index 100% rename from prdoc/pr_3377.prdoc rename to prdoc/1.9.0/pr_3377.prdoc diff --git a/prdoc/pr_3378.prdoc b/prdoc/1.9.0/pr_3378.prdoc similarity index 100% rename from prdoc/pr_3378.prdoc rename to prdoc/1.9.0/pr_3378.prdoc diff --git a/prdoc/pr_3403.prdoc b/prdoc/1.9.0/pr_3403.prdoc similarity index 100% rename from prdoc/pr_3403.prdoc rename to prdoc/1.9.0/pr_3403.prdoc diff --git a/prdoc/pr_3411.prdoc b/prdoc/1.9.0/pr_3411.prdoc similarity index 100% rename from prdoc/pr_3411.prdoc rename to prdoc/1.9.0/pr_3411.prdoc diff --git a/prdoc/pr_3412.prdoc b/prdoc/1.9.0/pr_3412.prdoc similarity index 100% rename from prdoc/pr_3412.prdoc rename to prdoc/1.9.0/pr_3412.prdoc diff --git a/prdoc/pr_3447.prdoc b/prdoc/1.9.0/pr_3447.prdoc similarity index 100% rename from prdoc/pr_3447.prdoc rename to prdoc/1.9.0/pr_3447.prdoc diff --git a/prdoc/pr_3453.prdoc b/prdoc/1.9.0/pr_3453.prdoc similarity index 100% rename from prdoc/pr_3453.prdoc rename to prdoc/1.9.0/pr_3453.prdoc diff --git a/prdoc/pr_3454.prdoc b/prdoc/1.9.0/pr_3454.prdoc similarity index 100% rename from prdoc/pr_3454.prdoc rename to prdoc/1.9.0/pr_3454.prdoc diff --git a/prdoc/pr_3456.prdoc b/prdoc/1.9.0/pr_3456.prdoc similarity index 100% rename from prdoc/pr_3456.prdoc rename to prdoc/1.9.0/pr_3456.prdoc diff --git a/prdoc/pr_3460.prdoc b/prdoc/1.9.0/pr_3460.prdoc similarity index 100% rename from prdoc/pr_3460.prdoc rename to prdoc/1.9.0/pr_3460.prdoc diff --git a/prdoc/pr_3491.prdoc b/prdoc/1.9.0/pr_3491.prdoc similarity index 100% rename from prdoc/pr_3491.prdoc rename to prdoc/1.9.0/pr_3491.prdoc diff --git a/prdoc/pr_3504.prdoc b/prdoc/1.9.0/pr_3504.prdoc similarity index 100% rename from prdoc/pr_3504.prdoc rename to prdoc/1.9.0/pr_3504.prdoc diff --git a/prdoc/pr_3505.prdoc b/prdoc/1.9.0/pr_3505.prdoc similarity index 100% rename from prdoc/pr_3505.prdoc rename to prdoc/1.9.0/pr_3505.prdoc diff --git a/prdoc/pr_3510.prdoc b/prdoc/1.9.0/pr_3510.prdoc similarity index 100% rename from prdoc/pr_3510.prdoc rename to prdoc/1.9.0/pr_3510.prdoc diff --git a/prdoc/pr_3513.prdoc b/prdoc/1.9.0/pr_3513.prdoc similarity index 100% rename from prdoc/pr_3513.prdoc rename to prdoc/1.9.0/pr_3513.prdoc diff --git a/prdoc/pr_3523.prdoc b/prdoc/1.9.0/pr_3523.prdoc similarity index 100% rename from prdoc/pr_3523.prdoc rename to prdoc/1.9.0/pr_3523.prdoc diff --git a/prdoc/pr_3532.prdoc b/prdoc/1.9.0/pr_3532.prdoc similarity index 100% rename from prdoc/pr_3532.prdoc rename to prdoc/1.9.0/pr_3532.prdoc diff --git a/prdoc/pr_3540.prdoc b/prdoc/1.9.0/pr_3540.prdoc similarity index 100% rename from prdoc/pr_3540.prdoc rename to prdoc/1.9.0/pr_3540.prdoc diff --git a/prdoc/pr_3574.prdoc b/prdoc/1.9.0/pr_3574.prdoc similarity index 100% rename from prdoc/pr_3574.prdoc rename to prdoc/1.9.0/pr_3574.prdoc diff --git a/prdoc/pr_3589.prdoc b/prdoc/1.9.0/pr_3589.prdoc similarity index 100% rename from prdoc/pr_3589.prdoc rename to prdoc/1.9.0/pr_3589.prdoc diff --git a/prdoc/pr_3606.prdoc b/prdoc/1.9.0/pr_3606.prdoc similarity index 100% rename from prdoc/pr_3606.prdoc rename to prdoc/1.9.0/pr_3606.prdoc diff --git a/prdoc/pr_3636.prdoc b/prdoc/1.9.0/pr_3636.prdoc similarity index 100% rename from prdoc/pr_3636.prdoc rename to prdoc/1.9.0/pr_3636.prdoc diff --git a/prdoc/pr_3639.prdoc b/prdoc/1.9.0/pr_3639.prdoc similarity index 100% rename from prdoc/pr_3639.prdoc rename to prdoc/1.9.0/pr_3639.prdoc diff --git a/prdoc/pr_3643.prdoc b/prdoc/1.9.0/pr_3643.prdoc similarity index 100% rename from prdoc/pr_3643.prdoc rename to prdoc/1.9.0/pr_3643.prdoc diff --git a/prdoc/pr_3665.prdoc b/prdoc/1.9.0/pr_3665.prdoc similarity index 100% rename from prdoc/pr_3665.prdoc rename to prdoc/1.9.0/pr_3665.prdoc diff --git a/prdoc/pr_3190.prdoc b/prdoc/pr_3190.prdoc new file mode 100644 index 000000000000..2f7a89a0b1ab --- /dev/null +++ b/prdoc/pr_3190.prdoc @@ -0,0 +1,17 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fix algorithmic complexity of the on-demand scheduler. + +doc: + - audience: Runtime Dev + description: | + Improves on demand performance by a significant factor. Previously, having many on-demand cores + would cause really poor blocktimes due to the fact that for each core the full order queue was + processed. This allows for increasing the max size of the on-demand queue if needed. + + At the same time, the spot price for on-demand is now checked prior to every order, ensuring + that economic backpressure will be applied. + +crates: + - name: polkadot-runtime-parachains diff --git a/prdoc/pr_3246.prdoc b/prdoc/pr_3246.prdoc new file mode 100644 index 000000000000..19a823e50285 --- /dev/null +++ b/prdoc/pr_3246.prdoc @@ -0,0 +1,11 @@ +title: Try State Hook for Beefy. + +doc: + - audience: Runtime User + description: | + Invariants for storage items in the beefy pallet. Enforces the following Invariants: + 1. `Authorities` should not exceed the `MaxAuthorities` capacity. + 2. `NextAuthorities` should not exceed the `MaxAuthorities` capacity. + 3. `ValidatorSetId` must be present in `SetIdSession`. +crates: +- name: pallet-beefy diff --git a/prdoc/pr_3341.prdoc b/prdoc/pr_3341.prdoc new file mode 100644 index 000000000000..de714fa5a1e5 --- /dev/null +++ b/prdoc/pr_3341.prdoc @@ -0,0 +1,18 @@ +title: "Fix `schedule_code_upgrade` when called by the owner/root" + +doc: + - audience: Runtime User + description: | + Fixes `schedule_code_upgrade` when being used by the owner/root. The call is used for + manually upgrading the validation code of a parachain on the relay chain. It was failing + before because the relay chain waited for the parachain to make progress. However, this + call is mostly used for when a parachain are bricked which means that they are not able + anymore to build any blocks. The fix is to schedule the validation code upgrade and then + to enact it at the scheduled block. The enacting happens now without requiring the parachain + to make any progress. + +crates: + - name: polkadot-runtime-common + bump: minor + - name: polkadot-runtime-parachains + bump: major diff --git a/prdoc/pr_3438.prdoc b/prdoc/pr_3438.prdoc new file mode 100644 index 000000000000..5f4a0e3d57af --- /dev/null +++ b/prdoc/pr_3438.prdoc @@ -0,0 +1,13 @@ +title: "Pools: Make PermissionlessWithdraw the default claim permission" + +doc: + - audience: Runtime User + description: | + Makes permissionless withdrawing the default claim permission, giving any network participant + access to claim pool rewards on member's behalf, by default. + +crates: + - name: pallet-nomination-pools + bump: minor + - name: pallet-nomination-pools-benchmarking + bump: minor diff --git a/prdoc/pr_3479.prdoc b/prdoc/pr_3479.prdoc new file mode 100644 index 000000000000..1e44ce5646b9 --- /dev/null +++ b/prdoc/pr_3479.prdoc @@ -0,0 +1,8 @@ +title: "Elastic scaling: runtime dependency tracking and enactment" + +doc: + - audience: Node Dev + description: | + Adds support in the inclusion and paras_inherent runtime modules for backing and including multiple candidates of the same para if they form a chain. + +crates: [ ] diff --git a/prdoc/pr_3521.prdoc b/prdoc/pr_3521.prdoc new file mode 100644 index 000000000000..4ad3f03bf0c5 --- /dev/null +++ b/prdoc/pr_3521.prdoc @@ -0,0 +1,12 @@ +title: Collator side changes for elastic scaling + +doc: + - audience: Node Dev + description: | + Parachain teams wishing to utilize the benefits of + elastic scaling will need to upgrade their collator + code to include these changes. + +crates: +- name: polkadot-collator-protocol + bump: minor diff --git a/prdoc/pr_3580.prdoc b/prdoc/pr_3580.prdoc new file mode 100644 index 000000000000..042fcf7a1a84 --- /dev/null +++ b/prdoc/pr_3580.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Expose `ClaimQueue` via a runtime api and consume it in `collation-generation` + +doc: + - audience: Node Dev + description: | + Creates a new runtime api exposing the `ClaimQueue` from `scheduler` pallet. Consume the api + in collation generation (if available) by getting what's scheduled on a core from the + `ClaimQueue` instead of from `next_up_on_available` (from `AvailabilityCores` runtime api). + +crates: [ ] diff --git a/prdoc/pr_3607.prdoc b/prdoc/pr_3607.prdoc new file mode 100644 index 000000000000..1a69b25ad255 --- /dev/null +++ b/prdoc/pr_3607.prdoc @@ -0,0 +1,26 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "XCM fee payment API" + +doc: + - audience: Runtime Dev + description: | + A runtime API was added for estimating the fees required for XCM execution and delivery. + This is the basic building block needed for UIs to accurately estimate fees. + An example implementation is shown in the PR. Ideally it's simple to implement, you only need to call existing parts of your XCM config. + The API looks like so: + ```rust + fn query_acceptable_payment_assets(xcm_version: Version) -> Result, Error>; + fn query_xcm_weight(message: VersionedXcm) -> Result; + fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result; + fn query_delivery_fees(destination: VersionedLocation, message: VersionedXcm<()>) -> Result; + ``` + The first three relate to XCM execution fees, given an XCM, you can query its weight, then which assets are acceptable for buying weight and convert weight to a number of those assets. + The last one takes in a destination and a message you want to send from the runtime you're executing this on, it will give you the delivery fees. + +crates: + - name: xcm-fee-payment-runtime-api + - name: rococo-runtime + - name: westend-runtime + diff --git a/prdoc/pr_3616.prdoc b/prdoc/pr_3616.prdoc new file mode 100644 index 000000000000..fcf068dcd173 --- /dev/null +++ b/prdoc/pr_3616.prdoc @@ -0,0 +1,28 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Benchmarking pallet V2 syntax extension: pov_mode attribute" + +doc: + - audience: Runtime Dev + description: | + Adds the `pov_mode` attribute from the V1 benchmarking syntax to the V2 syntax. This allows to + override the default PoV mode (`MaxEncodedLen`) to either `Measured` or `Ignored`. It can be + overridden for a whole benchmark, a key prefix of a specific key itself. + + Example syntax looks like this: + ```rust + #[benchmark(pov_mode = Measured { + Pallet: Measured, + Pallet::Storage: MaxEncodedLen, + })] + fn do_some() { + .. + } + ``` + +crates: + - name: frame-support-procedural + bump: minor + - name: frame-support + bump: minor diff --git a/prdoc/pr_3696.prdoc b/prdoc/pr_3696.prdoc new file mode 100644 index 000000000000..f3371d1734ad --- /dev/null +++ b/prdoc/pr_3696.prdoc @@ -0,0 +1,17 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Add HRMP notification handlers to the xcm-executor + +doc: + - audience: Runtime Dev + description: | + Adds optional HRMP notification handlers to the xcm-executor. These handlers are 3 new config types on the xcm-executor `Config` trait: + - `HrmpNewChannelOpenRequestHandler` + - `HrmpChannelAcceptedHandler` + - `HrmpChannelClosingHandler` + + The traits of these config types are implemented on tuples, and on `()` for the default case. + +crates: + - name: staging-xcm-executor diff --git a/prdoc/pr_3706.prdoc b/prdoc/pr_3706.prdoc new file mode 100644 index 000000000000..edeb08241bed --- /dev/null +++ b/prdoc/pr_3706.prdoc @@ -0,0 +1,20 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Extrinsic to restore corrupted staking ledgers + +doc: + - audience: Runtime User + description: | + This PR adds a new extrinsic `Call::restore_ledger ` gated by `StakingAdmin` origin that restores a corrupted staking ledger. This extrinsic will be used to recover ledgers that were affected by the issue discussed in https://github.com/paritytech/polkadot-sdk/issues/3245. + The extrinsic will re-write the storage items associated with a stash account provided as input parameter. The data used to reset the ledger can be either i) fetched on-chain or ii) partially/totally set by the input parameters of the call. + + Changes introduced: + - Adds `Call::restore_ledger ` extrinsic to recover a corrupted ledger; + - Adds trait `frame_support::traits::currency::InspectLockableCurrency` to allow external pallets to read current locks given an account and lock ID; + - Implements the `InspectLockableCurrency` in the pallet-balances. + - Adds staking locks try-runtime checks (https://github.com/paritytech/polkadot-sdk/issues/3751) + +crates: + - name: pallet-staking + - name: pallet-balances diff --git a/prdoc/pr_3714.prdoc b/prdoc/pr_3714.prdoc new file mode 100644 index 000000000000..e276d0d2d373 --- /dev/null +++ b/prdoc/pr_3714.prdoc @@ -0,0 +1,10 @@ +title: Handle legacy lease swaps on coretime + +doc: + - audience: Runtime Dev + description: | + When a `registar::swap` extrinsic is executed it swaps two leases on the relay chain but the + broker chain never knows about this swap. This change notifies the broker chain via a XCM + message for a swap so that it can update its state. +crates: + - name: pallet-broker diff --git a/prdoc/pr_3718.prdoc b/prdoc/pr_3718.prdoc new file mode 100644 index 000000000000..b2b24cc9704d --- /dev/null +++ b/prdoc/pr_3718.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Deprecate scheduler traits v1 and v2 + +doc: + - audience: Runtime Dev + description: | + Add `#[deprecated]` attribute to scheduler traits v1 and v2 to deprecate old versions + +crates: + - name: frame-support + - name: pallet-scheduler diff --git a/prdoc/pr_3738.prdoc b/prdoc/pr_3738.prdoc new file mode 100644 index 000000000000..cbf19b95c36a --- /dev/null +++ b/prdoc/pr_3738.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Removed `pallet::getter` usage from `pallet-alliance` + +doc: + - audience: Runtime Dev + description: | + This PR removes `pallet::getter` usage from `pallet-alliance`, and updates dependant code accordingly. + The syntax `StorageItem::::get()` should be used instead. + +crates: + - name: pallet-alliance + bump: major diff --git a/prdoc/pr_3740.prdoc b/prdoc/pr_3740.prdoc new file mode 100644 index 000000000000..03df8ec5fea0 --- /dev/null +++ b/prdoc/pr_3740.prdoc @@ -0,0 +1,18 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Removed `pallet::getter` usage from Beefy and MMR pallets + +doc: + - audience: Runtime Dev + description: | + This PR removes `pallet::getter` usage from `pallet-beefy`, `pallet-beefy-mmr` and `pallet-mmr`, and updates dependant code and runtimes accordingly. + The syntax `StorageItem::::get()` should be used instead. + +crates: + - name: pallet-beefy + - name: pallet-beefy-mmr + - name: pallet-mmr + - name: kitchensink-runtime + - name: rococo-runtime + - name: westend-runtime diff --git a/prdoc/pr_3749.prdoc b/prdoc/pr_3749.prdoc new file mode 100644 index 000000000000..1ebde9670e0f --- /dev/null +++ b/prdoc/pr_3749.prdoc @@ -0,0 +1,47 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "pallet-xcm: deprecate execute and send in favor of execute_blob and send_blob" + +doc: + - audience: Runtime Dev + description: | + pallet-xcm's extrinsics `execute` and `send` have been marked as deprecated. + Please change their usage to the new `execute_blob` and `send_blob`. + The migration from the old extrinsic to the new is very simple. + If you have your message `xcm: VersionedXcm`, then instead of passing in + `Box::new(xcm)` to both `execute` and `send`, you would pass in + `xcm.encode().try_into()` and handle the potential error of its encoded length + being bigger than `MAX_XCM_ENCODED_SIZE`. + + pallet-contracts takes the XCM encoded now as well. It follows the same API as + `execute_blob` and `send_blob`. + - audience: Runtime User + description: | + pallet-xcm has a new pair of extrinsics, `execute_blob` and `send_blob`. + These are meant to be used instead of `execute` and `send`, which are now deprecated + and will be removed eventually. + These new extrinsics just require you to input the encoded XCM. + There's a new utility in PolkadotJS Apps for encoding XCMs you can use: + https://polkadot.js.org/apps/#/utilities/xcm + Just pass in the encoded XCM to the new extrinsics and you're done. + + pallet-contracts takes the XCM encoded now as well. It follows the same API as + `execute_blob` and `send_blob`. + +crates: +- name: pallet-xcm +- name: staging-xcm +- name: staging-xcm-builder +- name: pallet-contracts +- name: asset-hub-rococo-runtime +- name: asset-hub-westend-runtime +- name: bridge-hub-rococo-runtime +- name: bridge-hub-westend-runtime +- name: collectives-westend-runtime +- name: coretime-rococo-runtime +- name: coretime-westend-runtime +- name: people-rococo-runtime +- name: people-westend-runtime +- name: rococo-runtime +- name: westend-runtime diff --git a/prdoc/pr_3754.prdoc b/prdoc/pr_3754.prdoc new file mode 100644 index 000000000000..94ea6d566088 --- /dev/null +++ b/prdoc/pr_3754.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Migrates Westend to Runtime V2 + +doc: + - audience: Runtime Dev + description: | + This PR migrates Westend from `construct_runtime` to Runtime V2 + as introduced in https://github.com/paritytech/polkadot-sdk/pull/1378 + +crates: + - name: westend-runtime diff --git a/prdoc/pr_3761.prdoc b/prdoc/pr_3761.prdoc new file mode 100644 index 000000000000..65b8c396fe3b --- /dev/null +++ b/prdoc/pr_3761.prdoc @@ -0,0 +1,25 @@ +title: "Snowbridge: Synchronize from Snowfork repository" + +doc: + - audience: Runtime Dev + description: | + This PR improves the beacon client to send the execution header along with the message as proof and removes the verification and storing of all execution headers. + If the AH sovereign account is depleted and relayer rewards cannot be paid, the message should still be processed. + +crates: +- name: snowbridge-pallet-ethereum-client + bump: minor +- name: snowbridge-pallet-inbound-queue + bump: minor +- name: snowbridge-beacon-primitives + bump: minor +- name: snowbridge-core + bump: minor +- name: snowbridge-runtime-test-common + bump: minor +- name: asset-hub-rococo-runtime + bump: minor +- name: bridge-hub-rococo-runtime + bump: minor +- name: bridge-hub-rococo-integration-tests + bump: minor diff --git a/prdoc/pr_3792.prdoc b/prdoc/pr_3792.prdoc new file mode 100644 index 000000000000..cbcdc29a9c6e --- /dev/null +++ b/prdoc/pr_3792.prdoc @@ -0,0 +1,19 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "[pallet-xcm] fix transport fees for remote reserve transfers" + +doc: + - audience: Runtime Dev + description: | + This PR fixes `pallet_xcm::transfer_assets` and + `pallet_xcm::limited_reserve_transfer_assets` extrinsics for transfers + that need to go through remote reserves. The fix is adding a + `SetFeesMode { jit_withdraw: true }` instruction before local execution of + `InitiateReserveWithdraw` so that delivery fees are correctly charged by + the xcm-executor. Without this change, a runtime that has implemented + delivery fees would not be able to execute remote reserve transfers using + these extrinsics. + +crates: + - name: pallet-xcm diff --git a/prdoc/pr_3795.prdoc b/prdoc/pr_3795.prdoc new file mode 100644 index 000000000000..da01fcbec821 --- /dev/null +++ b/prdoc/pr_3795.prdoc @@ -0,0 +1,14 @@ +title: Enable collators to build on multiple cores + +doc: + - audience: Node Dev + description: | + Introduces a `CoreIndex` parameter in `SubmitCollationParams`. This enables + the collators to make use of potentially multiple cores assigned at some relay + chain block. This extra parameter is used by the collator protocol and collation + generation subsystems to forward the collation to the approapriate backing group. + +crates: +- name: polkadot-node-collation-generation +- name: polkadot-collator-protocol + bump: minor diff --git a/prdoc/pr_3808.prdoc b/prdoc/pr_3808.prdoc new file mode 100644 index 000000000000..9b50f721df33 --- /dev/null +++ b/prdoc/pr_3808.prdoc @@ -0,0 +1,20 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fix spelling mistakes in source code + +doc: + - audience: Node Operator + description: | + Some spelling mistakes in log output, error messages and tracing (prometheus/grafana) have been fixed. + - audience: Runtime Dev + description: | + Public crate changes: + - The public trait `RuntimeParameterStore` in `substrate/frame/support` had the associated type renamed from `AggregratedKeyValue` to `AggregatedKeyValue`. + - The public trait `AggregratedKeyValue` in `substrate/frame/support` was similarly renamed to `AggregatedKeyValue`. + - The public methods `test_versioning` and `test_versioning_register_only` of the `TestApi` trait in `substrate/primitives/runtime-interface/test-wasm` had the spelling of `versionning` changed to `versioning`. + - The public functions `read_trie_first_descendant_value` and `read_child_trie_first_descendant_value` in `substrate/primitives/trie` had the spelling of `descedant` changed to `descendant`. +crates: + - name: frame-support + - name: sp-runtime-interface-test-wasm + - name: sp-trie diff --git a/prdoc/pr_3813.prdoc b/prdoc/pr_3813.prdoc new file mode 100644 index 000000000000..66dfd70e1b17 --- /dev/null +++ b/prdoc/pr_3813.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Renames `frame` crate to `polkadot-sdk-frame` + +doc: + - audience: Runtime Dev + description: | + This PR renames `frame` crate to `polkadot-sdk-frame` as `frame` is not available on crates.io. + Please note that this crate can only be imported as `polkadot-sdk-frame` or `frame`. + +crates: + - name: polkadot-sdk-frame + bump: major diff --git a/prdoc/pr_3817.prdoc b/prdoc/pr_3817.prdoc new file mode 100644 index 000000000000..bf9d397122f9 --- /dev/null +++ b/prdoc/pr_3817.prdoc @@ -0,0 +1,23 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Parachain Runtime API Implementations into mod apis Refactoring + +doc: + - audience: Runtime Dev + description: | + This PR introduces a refactoring to the runtime API implementations within the parachain template project. The primary changes include enhancing the visibility of `RUNTIME_API_VERSIONS` to `pub` in `impl_runtime_apis.rs`, centralizing API implementations in a new `apis.rs` file, and streamlining `lib.rs`. These changes aim to improve project structure, maintainability, and readability. + + Key Changes: + - `RUNTIME_API_VERSIONS` is now publicly accessible, enhancing module-wide visibility. + - Introduction of `apis.rs` centralizes runtime API implementations, promoting a cleaner and more navigable project structure. + - The main runtime library file, `lib.rs`, has been updated to reflect these structural changes, removing redundant API implementations and simplifying runtime configuration by pointing `VERSION` to the newly exposed `RUNTIME_API_VERSIONS` from `apis.rs`. + + Motivations: + - **Improved Project Structure**: Centralizing API implementations offers a more organized and understandable project layout. + - **Enhanced Readability**: The refactoring efforts aim to declutter `lib.rs`, facilitating easier comprehension for new contributors. + +crates: + - name: sp-api-proc-macro + - name: parachain-template-node + - name: parachain-template-runtime diff --git a/prdoc/pr_3835.prdoc b/prdoc/pr_3835.prdoc new file mode 100644 index 000000000000..d2f49f8fc116 --- /dev/null +++ b/prdoc/pr_3835.prdoc @@ -0,0 +1,54 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "migrations: prevent accidentally using inner unversioned migration instead of `VersionedMigration`" + +doc: + - audience: Runtime Dev + description: | + Currently, it is possible to accidentally use inner unversioned migration instead of `VersionedMigration` + since both implement `OnRuntimeUpgrade`. With this change, we make it clear that `Inner` is not intended + to be used directly. It is achieved by bounding `Inner` to new trait `UncheckedOnRuntimeUpgrade`, which + has the same interface as `OnRuntimeUpgrade`, but can not be used directly for runtime upgrade migrations. + + This change will break all existing migrations passed to `VersionedMigration`. Developers should simply change + those migrations to implement `UncheckedOnRuntimeUpgrade` instead of `OnRuntimeUpgrade`. + + Example: + + ``` + --- a/path/to/migration.rs + +++ b/path/to/migration.rs + @@ -1,7 +1,7 @@ + -impl OnRuntimeUpgrade for MigrateVNToVM { + +impl UncheckedOnRuntimeUpgrade for MigrateVNToVM { + fn on_runtime_upgrade() -> Weight { + // Migration logic here + // Adjust the migration logic if necessary to align with the expectations + // of new `UncheckedOnRuntimeUpgrade` trait. + 0 + } + } + ``` + +crates: + - name: "pallet-example-single-block-migrations" + bump: "major" + - name: "pallet-xcm" + bump: "major" + - name: "pallet-grandpa" + bump: "major" + - name: "pallet-identity" + bump: "major" + - name: "pallet-nomination-pools" + bump: "major" + - name: "pallet-society" + bump: "major" + - name: "frame-support" + bump: "major" + - name: "pallet-uniques" + bump: "major" + - name: "polkadot-runtime-parachains" + bump: "major" + - name: "polkadot-runtime-common" + bump: "major" diff --git a/prdoc/pr_3844.prdoc b/prdoc/pr_3844.prdoc new file mode 100644 index 000000000000..a92092f91b20 --- /dev/null +++ b/prdoc/pr_3844.prdoc @@ -0,0 +1,25 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Add the ability for MessageQueue to process enqueued messages on idle + +doc: + - audience: Runtime Dev + description: | + Add the option to use remaining weight on idle for processing enqueued messages. + This will increase the chances of the messages enqueued during inherent extrinsics to be processed in the same block. + New config types is added on the message-queue `Config` trait: + - `IdleMaxServiceWeight` + + example: + ```rust + parameter_types! { + // The maximum weight to be used from remaining weight for processing enqueued messages on idle + pub const IdleMaxServiceWeight: Weight = Some(Weight); + } + + type IdleMaxServiceWeight = IdleMaxServiceWeight; // or `()` to not use this feature + ``` + +crates: + - name: pallet-message-queue diff --git a/prdoc/pr_3849.prdoc b/prdoc/pr_3849.prdoc new file mode 100644 index 000000000000..a1372b60ffc6 --- /dev/null +++ b/prdoc/pr_3849.prdoc @@ -0,0 +1,13 @@ +title: Unrequest a pre-image when it failed to execute + +doc: + - audience: Runtime User + description: | + When a referenda finished the proposal will be scheduled. When it is scheduled, + the pre-image is requested. The pre-image is unrequested after the proposal + was executed. However, if the proposal failed to execute it wasn't unrequested. + Thus, it could not be removed from the on-chain state. This issue is now solved + by ensuring to unrequest the pre-image when it failed to execute. + +crates: + - name: pallet-scheduler diff --git a/prdoc/pr_3850.prdoc b/prdoc/pr_3850.prdoc new file mode 100644 index 000000000000..8f7ce16076e8 --- /dev/null +++ b/prdoc/pr_3850.prdoc @@ -0,0 +1,15 @@ +title: Detect incorrect pre-image length when submitting a referenda + +doc: + - audience: Runtime User + description: | + When submitting a referenda the `proposal` is passed as argument. + The `proposal` is most of the time a reference to a `pre-image` and + which also contains the length of the `pre-image`. This pull request + adds some logic to check that if the `pre-image` already exists and if + it exists, it ensures that the length is passed correctly. This prevents + that the referenda can not be executed because of a mismatch of this + length. + +crates: + - name: pallet-referenda diff --git a/prdoc/pr_3854.prdoc b/prdoc/pr_3854.prdoc new file mode 100644 index 000000000000..cfc8e246d7e1 --- /dev/null +++ b/prdoc/pr_3854.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Export unified `ParachainHostFunctions` from `cumulus-client-service` + +doc: + - audience: Node Dev + description: | + Exports `ParachainHostFunctions` to have a bundled version of `SubstrateHostFunctions` and + `cumulus_primitives_proof_size_hostfunction::storage_proof_size::HostFunctions`. This increases discoverability and makes + it more obvious that they should be used together in parachain nodes. + +crates: + - name: cumulus-client-service + bump: minor diff --git a/prdoc/pr_3927.prdoc b/prdoc/pr_3927.prdoc new file mode 100644 index 000000000000..a568636d0bd0 --- /dev/null +++ b/prdoc/pr_3927.prdoc @@ -0,0 +1,13 @@ +title: "pallet-xcm: deprecate transfer extrinsics without weight limit" + +doc: + - audience: Runtime Dev + description: | + pallet-xcm's extrinsics `teleport_assets` and `reserve_transfer_assets` have been + marked as deprecated. Please change their usage to the `limited_teleport_assets` + and `limited_reserve_transfer_assets`, respectively; or use the generic/flexible + `transfer_assets` extrinsic. + +crates: +- name: pallet-xcm + bump: minor diff --git a/prdoc/pr_3950.prdoc b/prdoc/pr_3950.prdoc new file mode 100644 index 000000000000..a333521898bf --- /dev/null +++ b/prdoc/pr_3950.prdoc @@ -0,0 +1,12 @@ +title: Add `ClaimQueue` wrapper + +doc: + - audience: Node Dev + description: | + Intoduces a new wrapper type: `ClaimQueueSnapshot`. It contains a snapshot of the `ClaimQueue` + at an arbitrary relay chain block. Two methods are exposed to allow access to the claims at + specific depths. + +crates: + - name: polkadot-node-subsystem-util + bump: minor diff --git a/prdoc/schema_user.json b/prdoc/schema_user.json index 1bd0b3b93ee4..593113cf3260 100644 --- a/prdoc/schema_user.json +++ b/prdoc/schema_user.json @@ -132,7 +132,8 @@ } }, "required": [ - "name" + "name", + "bump" ] }, "migration_db": { @@ -187,6 +188,11 @@ "const": "patch", "title": "Patch", "description": "A bump to the third leftmost non-zero digit of the version number." + }, + { + "const": "none", + "title": "None", + "description": "This change requires no SemVer bump (e.g. change was a test or benchmark)." } ] }, diff --git a/scripts/bridges_update_subtree.sh b/scripts/bridges_update_subtree.sh index 5c5c7a322a16..2cd6d968d2b2 100755 --- a/scripts/bridges_update_subtree.sh +++ b/scripts/bridges_update_subtree.sh @@ -1,6 +1,6 @@ #!/bin/bash -# A script to udpate bridges repo as subtree to Cumulus +# A script to update bridges repo as subtree to Cumulus # Usage: # ./scripts/bridges_update_subtree.sh fetch # ./scripts/bridges_update_subtree.sh patch diff --git a/scripts/release/build-changelogs.sh b/scripts/release/build-changelogs.sh index a9275f45a50c..cbfb7ad0e48f 100755 --- a/scripts/release/build-changelogs.sh +++ b/scripts/release/build-changelogs.sh @@ -48,6 +48,7 @@ for audience in "${audiences[@]}"; do echo "Processing audience: $audience ($audience_id)" export TARGET_AUDIENCE=$audience tera -t "${TEMPLATE_AUDIENCE}" --env --env-key env "${CONTEXT_JSON}" > "$OUTPUT/relnote_${audience_id}.md" + cat "$OUTPUT/relnote_${audience_id}.md" >> "$OUTPUT/relnote_combined.md" done # Show the files diff --git a/scripts/release/templates/audience.md.tera b/scripts/release/templates/audience.md.tera index dc507053dd5a..0b47850e3a37 100644 --- a/scripts/release/templates/audience.md.tera +++ b/scripts/release/templates/audience.md.tera @@ -1,11 +1,9 @@ -## Release {{ env.PRODUCT }} {{ env.VERSION }} - -Changelog for `{{ env.TARGET_AUDIENCE }}`. +### Changelog for `{{ env.TARGET_AUDIENCE }}` {% for file in prdoc -%} -#### PR #{{file.doc_filename.number}}: {{ file.content.title }} {% for doc_item in file.content.doc %} {%- if doc_item.audience == env.TARGET_AUDIENCE %} +#### [#{{file.doc_filename.number}}]: {{ file.content.title }} {{ doc_item.description }} {% endif -%} diff --git a/scripts/snowbridge_update_subtree.sh b/scripts/snowbridge_update_subtree.sh index 2276bb35469f..c572eaa18faa 100755 --- a/scripts/snowbridge_update_subtree.sh +++ b/scripts/snowbridge_update_subtree.sh @@ -1,6 +1,6 @@ #!/bin/bash -# A script to udpate bridges repo as subtree to Cumulus +# A script to update bridges repo as subtree to Cumulus # Usage: # ./scripts/update_subtree_snowbridge.sh fetch # ./scripts/update_subtree_snowbridge.sh patch diff --git a/substrate/bin/node/bench/Cargo.toml b/substrate/bin/node/bench/Cargo.toml index d5de1dbe6c44..49485fe2a1b9 100644 --- a/substrate/bin/node/bench/Cargo.toml +++ b/substrate/bin/node/bench/Cargo.toml @@ -44,4 +44,4 @@ lazy_static = "1.4.0" parity-db = "0.4.12" sc-transaction-pool = { path = "../../../client/transaction-pool" } sc-transaction-pool-api = { path = "../../../client/transaction-pool/api" } -futures = { version = "0.3.21", features = ["thread-pool"] } +futures = { version = "0.3.30", features = ["thread-pool"] } diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml index 8bddbbe04828..6346063b9d27 100644 --- a/substrate/bin/node/cli/Cargo.toml +++ b/substrate/bin/node/cli/Cargo.toml @@ -45,7 +45,7 @@ clap = { version = "4.5.3", features = ["derive"], optional = true } codec = { package = "parity-scale-codec", version = "3.6.1" } serde = { features = ["derive"], workspace = true, default-features = true } jsonrpsee = { version = "0.22", features = ["server"] } -futures = "0.3.21" +futures = "0.3.30" log = { workspace = true, default-features = true } rand = "0.8" @@ -129,7 +129,7 @@ sc-block-builder = { path = "../../../client/block-builder" } sp-tracing = { path = "../../../primitives/tracing" } sp-blockchain = { path = "../../../primitives/blockchain" } sp-crypto-hashing = { path = "../../../primitives/crypto/hashing" } -futures = "0.3.21" +futures = "0.3.30" tempfile = "3.1.0" assert_cmd = "2.0.2" nix = { version = "0.26.1", features = ["signal"] } @@ -160,7 +160,7 @@ sp-externalities = { path = "../../../primitives/externalities" } sp-keyring = { path = "../../../primitives/keyring" } sp-runtime = { path = "../../../primitives/runtime" } serde_json = { workspace = true, default-features = true } -scale-info = { version = "2.10.0", features = ["derive", "serde"] } +scale-info = { version = "2.11.1", features = ["derive", "serde"] } sp-trie = { path = "../../../primitives/trie" } sp-state-machine = { path = "../../../primitives/state-machine" } diff --git a/substrate/bin/node/cli/benches/block_production.rs b/substrate/bin/node/cli/benches/block_production.rs index 23a62cc0bd24..d04780d5f953 100644 --- a/substrate/bin/node/cli/benches/block_production.rs +++ b/substrate/bin/node/cli/benches/block_production.rs @@ -145,7 +145,7 @@ fn prepare_benchmark(client: &FullClient) -> (usize, Vec) { let src = Sr25519Keyring::Alice.pair(); let dst: MultiAddress = Sr25519Keyring::Bob.to_account_id().into(); - // Add as many tranfer extrinsics as possible into a single block. + // Add as many transfer extrinsics as possible into a single block. for nonce in 0.. { let extrinsic: OpaqueExtrinsic = create_extrinsic( client, @@ -179,7 +179,7 @@ fn block_production(c: &mut Criterion) { let node = new_node(tokio_handle.clone()); let client = &*node.client; - // Buliding the very first block is around ~30x slower than any subsequent one, + // Building the very first block is around ~30x slower than any subsequent one, // so let's make sure it's built and imported before we benchmark anything. let mut block_builder = BlockBuilderBuilder::new(client) .on_parent_block(client.chain_info().best_hash) diff --git a/substrate/bin/node/cli/src/cli.rs b/substrate/bin/node/cli/src/cli.rs index 3345afae4fd2..56fbed51f8a4 100644 --- a/substrate/bin/node/cli/src/cli.rs +++ b/substrate/bin/node/cli/src/cli.rs @@ -49,7 +49,7 @@ pub struct Cli { /// Possible subcommands of the main binary. #[derive(Debug, clap::Subcommand)] pub enum Subcommand { - /// The custom inspect subcommmand for decoding blocks and extrinsics. + /// The custom inspect subcommand for decoding blocks and extrinsics. #[command( name = "inspect", about = "Decode given block or extrinsic using current native runtime." diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs index 460e84f4bd27..10ab9c888869 100644 --- a/substrate/bin/node/cli/src/service.rs +++ b/substrate/bin/node/cli/src/service.rs @@ -53,7 +53,7 @@ pub type HostFunctions = ( frame_benchmarking::benchmarking::HostFunctions, ); -/// A specialized `WasmExecutor` intended to use accross substrate node. It provides all required +/// A specialized `WasmExecutor` intended to use across substrate node. It provides all required /// HostFunctions. pub type RuntimeExecutor = sc_executor::WasmExecutor; @@ -422,6 +422,7 @@ pub fn new_full_base( let shared_voter_state = rpc_setup; let auth_disc_publish_non_global_ips = config.network.allow_non_globals_in_dht; + let auth_disc_public_addresses = config.network.public_addresses.clone(); let mut net_config = sc_network::config::FullNetworkConfiguration::new(&config.network); let genesis_hash = client.block_hash(0).ok().flatten().expect("Genesis block exists; qed"); @@ -610,6 +611,7 @@ pub fn new_full_base( sc_authority_discovery::new_worker_and_service_with_config( sc_authority_discovery::WorkerConfig { publish_non_global_ips: auth_disc_publish_non_global_ips, + public_addresses: auth_disc_public_addresses, ..Default::default() }, client.clone(), @@ -650,6 +652,7 @@ pub fn new_full_base( prometheus_registry: prometheus_registry.clone(), links: beefy_links, on_demand_justifications_handler: beefy_on_demand_justifications_handler, + is_authority: role.is_authority(), }; let beefy_gadget = beefy::start_beefy_gadget::<_, _, _, _, _, _, _>(beefy_params); diff --git a/substrate/bin/node/cli/tests/fees.rs b/substrate/bin/node/cli/tests/fees.rs index 8c7b3c873157..69c96bf63a6d 100644 --- a/substrate/bin/node/cli/tests/fees.rs +++ b/substrate/bin/node/cli/tests/fees.rs @@ -135,7 +135,7 @@ fn transaction_fee_is_correct() { // if weight of the cheapest weight would be 10^7, this would be 10^9, which is: // - 1 MILLICENTS in substrate node. // - 1 milli-dot based on current polkadot runtime. - // (this baed on assigning 0.1 CENT to the cheapest tx with `weight = 100`) + // (this based on assigning 0.1 CENT to the cheapest tx with `weight = 100`) let mut t = new_test_ext(compact_code_unwrap()); t.insert(>::hashed_key_for(alice()), new_account_info(100)); t.insert(>::hashed_key_for(bob()), new_account_info(10)); diff --git a/substrate/bin/node/runtime/Cargo.toml b/substrate/bin/node/runtime/Cargo.toml index e49a2d08a454..5ce384e69c7e 100644 --- a/substrate/bin/node/runtime/Cargo.toml +++ b/substrate/bin/node/runtime/Cargo.toml @@ -23,7 +23,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", "max-encoded-len", ] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } static_assertions = "1.1.0" log = { workspace = true } serde_json = { features = ["alloc", "arbitrary_precision"], workspace = true } diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index d91a6aee15bf..177a49afde5e 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -1306,6 +1306,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = ConstU32<{ 64 * 1024 }>; type MaxStale = ConstU32<128>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = (); } parameter_types! { @@ -2982,7 +2983,7 @@ impl_runtime_apis! { #[api_version(3)] impl sp_consensus_beefy::BeefyApi for Runtime { fn beefy_genesis() -> Option { - Beefy::genesis_block() + pallet_beefy::GenesisBlock::::get() } fn validator_set() -> Option> { @@ -3021,11 +3022,11 @@ impl_runtime_apis! { BlockNumber, > for Runtime { fn mmr_root() -> Result { - Ok(Mmr::mmr_root()) + Ok(pallet_mmr::RootHash::::get()) } fn mmr_leaf_count() -> Result { - Ok(Mmr::mmr_leaves()) + Ok(pallet_mmr::NumberOfLeaves::::get()) } fn generate_proof( diff --git a/substrate/bin/node/testing/Cargo.toml b/substrate/bin/node/testing/Cargo.toml index 31f8689d46ca..fa3f90193ba5 100644 --- a/substrate/bin/node/testing/Cargo.toml +++ b/substrate/bin/node/testing/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } fs_extra = "1" -futures = "0.3.21" +futures = "0.3.30" log = { workspace = true, default-features = true } tempfile = "3.1.0" frame-system = { path = "../../../frame/system" } diff --git a/substrate/bin/node/testing/src/bench.rs b/substrate/bin/node/testing/src/bench.rs index df302a6453b9..e5c2563905e9 100644 --- a/substrate/bin/node/testing/src/bench.rs +++ b/substrate/bin/node/testing/src/bench.rs @@ -84,7 +84,7 @@ impl BenchPair { /// Drop system cache. /// -/// Will panic if cache drop is impossbile. +/// Will panic if cache drop is impossible. pub fn drop_system_cache() { #[cfg(target_os = "windows")] { @@ -173,7 +173,7 @@ impl Clone for BenchDb { // We clear system cache after db clone but before any warmups. // This populates system cache with some data unrelated to actual - // data we will be quering further under benchmark (like what + // data we will be querying further under benchmark (like what // would have happened in real system that queries random entries // from database). drop_system_cache(); @@ -443,7 +443,7 @@ impl BenchDb { BlockContentIterator::new(content, &self.keyring, client) } - /// Get cliet for this database operations. + /// Get client for this database operations. pub fn client(&mut self) -> Client { let (client, _backend, _task_executor) = Self::bench_client(self.database_type, self.directory_guard.path(), &self.keyring); diff --git a/substrate/bin/utils/chain-spec-builder/src/lib.rs b/substrate/bin/utils/chain-spec-builder/src/lib.rs index 8c78030c8854..dbd0437921ff 100644 --- a/substrate/bin/utils/chain-spec-builder/src/lib.rs +++ b/substrate/bin/utils/chain-spec-builder/src/lib.rs @@ -185,7 +185,7 @@ pub struct ConvertToRawCmd { /// Verifies the provided input chain spec. /// /// Silently checks if given input chain spec can be converted to raw. It allows to check if all -/// RuntimeGenesisConfig fiels are properly initialized and if the json does not contain invalid +/// RuntimeGenesisConfig fields are properly initialized and if the json does not contain invalid /// fields. #[derive(Parser, Debug, Clone)] pub struct VerifyCmd { diff --git a/substrate/bin/utils/subkey/README.md b/substrate/bin/utils/subkey/README.md index a5f27cfd3707..fc1053e232d7 100644 --- a/substrate/bin/utils/subkey/README.md +++ b/substrate/bin/utils/subkey/README.md @@ -77,7 +77,7 @@ below can be derived from those secrets. The output above also show the **public key** and the **Account ID**. Those are the independent from the network where you will use the key. -The **SS58 address** (or **Public Address**) of a new account is a reprensentation of the public keys of an account for +The **SS58 address** (or **Public Address**) of a new account is a representation of the public keys of an account for a given network (for instance Kusama or Polkadot). You can read more about the [SS58 format in the Substrate Docs](https://docs.substrate.io/reference/address-formats/) @@ -143,7 +143,7 @@ Secret phrase `soup lyrics media market way crouch elevator put moon useful ques SS58 Address: 5He5pZpc7AJ8evPuab37vJF6KkFDqq9uDq2WXh877Qw6iaVC ``` -Using the `inspect` command (see more details below), we see that knowning only the **secret seed** is no longer +Using the `inspect` command (see more details below), we see that knowing only the **secret seed** is no longer sufficient to recover the account: ```bash diff --git a/substrate/bin/utils/subkey/src/lib.rs b/substrate/bin/utils/subkey/src/lib.rs index f3023acde404..33f28ef46a5e 100644 --- a/substrate/bin/utils/subkey/src/lib.rs +++ b/substrate/bin/utils/subkey/src/lib.rs @@ -94,10 +94,10 @@ //! seed** (also called **Private Key**). Those 2 secrets are the pieces of information you MUST //! keep safe and secret. All the other information below can be derived from those secrets. //! -//! The output above also show the **public key** and the **Account ID**. Those are the independant +//! The output above also show the **public key** and the **Account ID**. Those are the independent //! from the network where you will use the key. //! -//! The **SS58 address** (or **Public Address**) of a new account is a reprensentation of the public +//! The **SS58 address** (or **Public Address**) of a new account is a representation of the public //! keys of an account for a given network (for instance Kusama or Polkadot). //! //! You can read more about the [SS58 format in the Substrate Docs](https://docs.substrate.io/reference/address-formats/) and see the list of reserved prefixes in the [SS58 Registry](https://github.com/paritytech/ss58-registry). @@ -110,7 +110,7 @@ //! //! ### Json output //! -//! `subkey` can calso generate the output as *json*. This is useful for automation. +//! `subkey` can also generate the output as *json*. This is useful for automation. //! //! command: //! @@ -163,7 +163,7 @@ //! SS58 Address: 5He5pZpc7AJ8evPuab37vJF6KkFDqq9uDq2WXh877Qw6iaVC //! ``` //! -//! Using the `inspect` command (see more details below), we see that knowning only the **secret +//! Using the `inspect` command (see more details below), we see that knowing only the **secret //! seed** is no longer sufficient to recover the account: //! //! ```bash @@ -184,7 +184,7 @@ //! //! ### Inspecting a key //! -//! If you have *some data* about a key, `subkey inpsect` will help you discover more information +//! If you have *some data* about a key, `subkey inspect` will help you discover more information //! about it. //! //! If you have **secrets** that you would like to verify for instance, you can use: diff --git a/substrate/client/api/Cargo.toml b/substrate/client/api/Cargo.toml index cd7b613e277f..fb650c5b532f 100644 --- a/substrate/client/api/Cargo.toml +++ b/substrate/client/api/Cargo.toml @@ -21,7 +21,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } fnv = "1.0.6" -futures = "0.3.21" +futures = "0.3.30" log = { workspace = true, default-features = true } parking_lot = "0.12.1" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } diff --git a/substrate/client/authority-discovery/Cargo.toml b/substrate/client/authority-discovery/Cargo.toml index cdd4052f0b09..dbd9ba0131a6 100644 --- a/substrate/client/authority-discovery/Cargo.toml +++ b/substrate/client/authority-discovery/Cargo.toml @@ -21,7 +21,7 @@ prost-build = "0.11" [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.1" ip_network = "0.4.1" libp2p = { version = "0.51.4", features = ["ed25519", "kad"] } @@ -29,6 +29,7 @@ multihash = { version = "0.18.1", default-features = false, features = [ "sha2", "std", ] } +linked_hash_set = "0.1.4" log = { workspace = true, default-features = true } prost = "0.12" rand = "0.8.5" @@ -42,7 +43,7 @@ sp-blockchain = { path = "../../primitives/blockchain" } sp-core = { path = "../../primitives/core" } sp-keystore = { path = "../../primitives/keystore" } sp-runtime = { path = "../../primitives/runtime" } -async-trait = "0.1.74" +async-trait = "0.1.79" multihash-codetable = { version = "0.1.1", features = [ "digest", "serde", diff --git a/substrate/client/authority-discovery/src/interval.rs b/substrate/client/authority-discovery/src/interval.rs index 23c7ce266e3b..0eee0d159cd8 100644 --- a/substrate/client/authority-discovery/src/interval.rs +++ b/substrate/client/authority-discovery/src/interval.rs @@ -28,6 +28,7 @@ use std::{ /// /// Doubles interval duration on each tick until the configured maximum is reached. pub struct ExpIncInterval { + start: Duration, max: Duration, next: Duration, delay: Delay, @@ -37,14 +38,29 @@ impl ExpIncInterval { /// Create a new [`ExpIncInterval`]. pub fn new(start: Duration, max: Duration) -> Self { let delay = Delay::new(start); - Self { max, next: start * 2, delay } + Self { start, max, next: start * 2, delay } } - /// Fast forward the exponentially increasing interval to the configured maximum. + /// Fast forward the exponentially increasing interval to the configured maximum, if not already + /// set. pub fn set_to_max(&mut self) { + if self.next == self.max { + return; + } + self.next = self.max; self.delay = Delay::new(self.next); } + + /// Rewind the exponentially increasing interval to the configured start, if not already set. + pub fn set_to_start(&mut self) { + if self.next == self.start * 2 { + return; + } + + self.next = self.start * 2; + self.delay = Delay::new(self.start); + } } impl Stream for ExpIncInterval { diff --git a/substrate/client/authority-discovery/src/lib.rs b/substrate/client/authority-discovery/src/lib.rs index 6bb12804cada..281188de1432 100644 --- a/substrate/client/authority-discovery/src/lib.rs +++ b/substrate/client/authority-discovery/src/lib.rs @@ -80,6 +80,10 @@ pub struct WorkerConfig { /// Defaults to `true` to avoid the surprise factor. pub publish_non_global_ips: bool, + /// Public addresses set by the node operator to always publish first in the authority + /// discovery DHT record. + pub public_addresses: Vec, + /// Reject authority discovery records that are not signed by their network identity (PeerId) /// /// Defaults to `false` to provide compatibility with old versions @@ -104,6 +108,7 @@ impl Default for WorkerConfig { // `authority_discovery_dht_event_received`. max_query_interval: Duration::from_secs(10 * 60), publish_non_global_ips: true, + public_addresses: Vec::new(), strict_record_validation: false, } } diff --git a/substrate/client/authority-discovery/src/tests.rs b/substrate/client/authority-discovery/src/tests.rs index 4fbc196c5ecd..edd50d073c8d 100644 --- a/substrate/client/authority-discovery/src/tests.rs +++ b/substrate/client/authority-discovery/src/tests.rs @@ -100,7 +100,7 @@ fn cryptos_are_compatible() { let sp_core_signature = sp_core_secret.sign(message); // no error expected... assert!(sp_core::ed25519::Pair::verify( - &sp_core::ed25519::Signature::from_slice(&libp2p_signature).unwrap(), + &sp_core::ed25519::Signature::try_from(libp2p_signature.as_slice()).unwrap(), message, &sp_core_public )); diff --git a/substrate/client/authority-discovery/src/worker.rs b/substrate/client/authority-discovery/src/worker.rs index 9bccb96ff378..546f8cdbffdc 100644 --- a/substrate/client/authority-discovery/src/worker.rs +++ b/substrate/client/authority-discovery/src/worker.rs @@ -35,6 +35,7 @@ use addr_cache::AddrCache; use codec::{Decode, Encode}; use ip_network::IpNetwork; use libp2p::{core::multiaddr, identity::PublicKey, multihash::Multihash, Multiaddr, PeerId}; +use linked_hash_set::LinkedHashSet; use multihash_codetable::{Code, MultihashDigest}; use log::{debug, error, log_enabled}; @@ -120,14 +121,25 @@ pub struct Worker { /// Interval to be proactive, publishing own addresses. publish_interval: ExpIncInterval, + /// Pro-actively publish our own addresses at this interval, if the keys in the keystore /// have changed. publish_if_changed_interval: ExpIncInterval, + /// List of keys onto which addresses have been published at the latest publication. /// Used to check whether they have changed. latest_published_keys: HashSet, + /// List of the kademlia keys that have been published at the latest publication. + /// Used to associate DHT events with our published records. + latest_published_kad_keys: HashSet, + /// Same value as in the configuration. publish_non_global_ips: bool, + + /// Public addresses set by the node operator to always publish first in the authority + /// discovery DHT record. + public_addresses: LinkedHashSet, + /// Same value as in the configuration. strict_record_validation: bool, @@ -136,6 +148,7 @@ pub struct Worker { /// Queue of throttled lookups pending to be passed to the network. pending_lookups: Vec, + /// Set of in-flight lookups. in_flight_lookups: HashMap, @@ -224,6 +237,29 @@ where None => None, }; + let public_addresses = { + let local_peer_id: Multihash = network.local_peer_id().into(); + + config + .public_addresses + .into_iter() + .map(|mut address| { + if let Some(multiaddr::Protocol::P2p(peer_id)) = address.iter().last() { + if peer_id != local_peer_id { + error!( + target: LOG_TARGET, + "Discarding invalid local peer ID in public address {address}.", + ); + } + // Always discard `/p2p/...` protocol for proper address comparison (local + // peer id will be added before publishing). + address.pop(); + } + address + }) + .collect() + }; + Worker { from_service: from_service.fuse(), client, @@ -232,7 +268,9 @@ where publish_interval, publish_if_changed_interval, latest_published_keys: HashSet::new(), + latest_published_kad_keys: HashSet::new(), publish_non_global_ips: config.publish_non_global_ips, + public_addresses, strict_record_validation: config.strict_record_validation, query_interval, pending_lookups: Vec::new(), @@ -304,32 +342,58 @@ where } fn addresses_to_publish(&self) -> impl Iterator { - let peer_id: Multihash = self.network.local_peer_id().into(); + let local_peer_id = self.network.local_peer_id(); let publish_non_global_ips = self.publish_non_global_ips; - let addresses = self.network.external_addresses().into_iter().filter(move |a| { - if publish_non_global_ips { - return true - } + let addresses = self + .public_addresses + .clone() + .into_iter() + .chain(self.network.external_addresses().into_iter().filter_map(|mut address| { + // Make sure the reported external address does not contain `/p2p/...` protocol. + if let Some(multiaddr::Protocol::P2p(peer_id)) = address.iter().last() { + if peer_id != *local_peer_id.as_ref() { + error!( + target: LOG_TARGET, + "Network returned external address '{address}' with peer id \ + not matching the local peer id '{local_peer_id}'.", + ); + debug_assert!(false); + } + address.pop(); + } - a.iter().all(|p| match p { - // The `ip_network` library is used because its `is_global()` method is stable, - // while `is_global()` in the standard library currently isn't. - multiaddr::Protocol::Ip4(ip) if !IpNetwork::from(ip).is_global() => false, - multiaddr::Protocol::Ip6(ip) if !IpNetwork::from(ip).is_global() => false, - _ => true, + if self.public_addresses.contains(&address) { + // Already added above. + None + } else { + Some(address) + } + })) + .filter(move |address| { + if publish_non_global_ips { + return true + } + + address.iter().all(|protocol| match protocol { + // The `ip_network` library is used because its `is_global()` method is stable, + // while `is_global()` in the standard library currently isn't. + multiaddr::Protocol::Ip4(ip) if !IpNetwork::from(ip).is_global() => false, + multiaddr::Protocol::Ip6(ip) if !IpNetwork::from(ip).is_global() => false, + _ => true, + }) }) - }); + .collect::>(); - debug!(target: LOG_TARGET, "Authority DHT record peer_id='{:?}' addresses='{:?}'", peer_id, addresses.clone().collect::>()); + debug!( + target: LOG_TARGET, + "Authority DHT record peer_id='{local_peer_id}' addresses='{addresses:?}'", + ); - // The address must include the peer id if not already set. - addresses.map(move |a| { - if a.iter().any(|p| matches!(p, multiaddr::Protocol::P2p(_))) { - a - } else { - a.with(multiaddr::Protocol::P2p(peer_id)) - } - }) + // The address must include the local peer id. + let local_peer_id: Multihash = local_peer_id.into(); + addresses + .into_iter() + .map(move |a| a.with(multiaddr::Protocol::P2p(local_peer_id))) } /// Publish own public addresses. @@ -347,8 +411,17 @@ where self.client.as_ref(), ).await?.into_iter().collect::>(); - if only_if_changed && keys == self.latest_published_keys { - return Ok(()) + if only_if_changed { + // If the authority keys did not change and the `publish_if_changed_interval` was + // triggered then do nothing. + if keys == self.latest_published_keys { + return Ok(()) + } + + // We have detected a change in the authority keys, reset the timers to + // publish and gather data faster. + self.publish_interval.set_to_start(); + self.query_interval.set_to_start(); } let addresses = serialize_addresses(self.addresses_to_publish()); @@ -372,6 +445,8 @@ where keys_vec, )?; + self.latest_published_kad_keys = kv_pairs.iter().map(|(k, _)| k.clone()).collect(); + for (key, value) in kv_pairs.into_iter() { self.network.put_value(key, value); } @@ -473,6 +548,10 @@ where } }, DhtEvent::ValuePut(hash) => { + if !self.latest_published_kad_keys.contains(&hash) { + return; + } + // Fast forward the exponentially increasing interval to the configured maximum. In // case this was the first successful address publishing there is no need for a // timely retry. @@ -485,6 +564,11 @@ where debug!(target: LOG_TARGET, "Successfully put hash '{:?}' on Dht.", hash) }, DhtEvent::ValuePutFailed(hash) => { + if !self.latest_published_kad_keys.contains(&hash) { + // Not a value we have published or received multiple times. + return; + } + if let Some(metrics) = &self.metrics { metrics.dht_event_received.with_label_values(&["value_put_failed"]).inc(); } diff --git a/substrate/client/authority-discovery/src/worker/tests.rs b/substrate/client/authority-discovery/src/worker/tests.rs index c29120881940..6c684d88e502 100644 --- a/substrate/client/authority-discovery/src/worker/tests.rs +++ b/substrate/client/authority-discovery/src/worker/tests.rs @@ -716,12 +716,16 @@ fn addresses_to_publish_adds_p2p() { #[test] fn addresses_to_publish_respects_existing_p2p_protocol() { let (_dht_event_tx, dht_event_rx) = channel(1000); + let identity = Keypair::generate_ed25519(); + let peer_id = identity.public().to_peer_id(); + let external_address = "/ip6/2001:db8::/tcp/30333" + .parse::() + .unwrap() + .with(multiaddr::Protocol::P2p(peer_id.into())); let network: Arc = Arc::new(TestNetwork { - external_addresses: vec![ - "/ip6/2001:db8::/tcp/30333/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC" - .parse() - .unwrap(), - ], + peer_id, + identity, + external_addresses: vec![external_address], ..Default::default() }); diff --git a/substrate/client/basic-authorship/Cargo.toml b/substrate/client/basic-authorship/Cargo.toml index 51a06464d0d6..4890b66c9b2f 100644 --- a/substrate/client/basic-authorship/Cargo.toml +++ b/substrate/client/basic-authorship/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.1" log = { workspace = true, default-features = true } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } diff --git a/substrate/client/basic-authorship/src/basic_authorship.rs b/substrate/client/basic-authorship/src/basic_authorship.rs index 932287ac8657..1519c76c42c0 100644 --- a/substrate/client/basic-authorship/src/basic_authorship.rs +++ b/substrate/client/basic-authorship/src/basic_authorship.rs @@ -180,7 +180,7 @@ impl ProposerFactory { /// The soft deadline indicates where we should stop attempting to add transactions /// to the block, which exhaust resources. After soft deadline is reached, /// we switch to a fixed-amount mode, in which after we see `MAX_SKIPPED_TRANSACTIONS` - /// transactions which exhaust resrouces, we will conclude that the block is full. + /// transactions which exhaust resources, we will conclude that the block is full. /// /// Setting the value too low will significantly limit the amount of transactions /// we try in case they exhaust resources. Setting the value too high can diff --git a/substrate/client/chain-spec/Cargo.toml b/substrate/client/chain-spec/Cargo.toml index f2138c07d71a..f569b5f14a66 100644 --- a/substrate/client/chain-spec/Cargo.toml +++ b/substrate/client/chain-spec/Cargo.toml @@ -34,7 +34,7 @@ sp-runtime = { path = "../../primitives/runtime" } sp-state-machine = { path = "../../primitives/state-machine" } log = { workspace = true } array-bytes = { version = "6.1" } -docify = "0.2.7" +docify = "0.2.8" [dev-dependencies] substrate-test-runtime = { path = "../../test-utils/runtime" } diff --git a/substrate/client/chain-spec/src/chain_spec.rs b/substrate/client/chain-spec/src/chain_spec.rs index 78e81e10d2b6..4ec8527de264 100644 --- a/substrate/client/chain-spec/src/chain_spec.rs +++ b/substrate/client/chain-spec/src/chain_spec.rs @@ -52,7 +52,7 @@ enum GenesisSource { File(PathBuf), Binary(Cow<'static, [u8]>), /// factory function + code - //Factory and G type parameter shall be removed togheter with `ChainSpec::from_genesis` + //Factory and G type parameter shall be removed together with `ChainSpec::from_genesis` Factory(Arc G + Send + Sync>, Vec), Storage(Storage), /// build action + code @@ -264,7 +264,7 @@ struct RuntimeInnerWrapper { enum Genesis { /// (Deprecated) Contains the JSON representation of G (the native type representing the /// runtime's `RuntimeGenesisConfig` struct) (will be removed with `ChainSpec::from_genesis`) - /// without the runtime code. It is required to deserialize the legacy chainspecs genereted + /// without the runtime code. It is required to deserialize the legacy chainspecs generated /// with `ChainsSpec::from_genesis` method. Runtime(G), /// (Deprecated) Contains the JSON representation of G (the native type representing the @@ -276,13 +276,13 @@ enum Genesis { Raw(RawGenesis), /// State root hash of the genesis storage. StateRootHash(StorageData), - /// Represents the runtime genesis config in JSON format toghether with runtime code. + /// Represents the runtime genesis config in JSON format together with runtime code. RuntimeGenesis(RuntimeGenesisInner), } /// A configuration of a client. Does not include runtime storage initialization. /// Note: `genesis` field is ignored due to way how the chain specification is serialized into -/// JSON file. Refer to [`ChainSpecJsonContainer`], which flattens [`ClientSpec`] and denies uknown +/// JSON file. Refer to [`ChainSpecJsonContainer`], which flattens [`ClientSpec`] and denies unknown /// fields. #[derive(Serialize, Deserialize, Clone, Debug)] #[serde(rename_all = "camelCase")] @@ -508,7 +508,7 @@ impl ChainSpec { self.client_spec.fork_id.as_deref() } - /// Additional loosly-typed properties of the chain. + /// Additional loosely-typed properties of the chain. /// /// Returns an empty JSON object if 'properties' not defined in config pub fn properties(&self) -> Properties { diff --git a/substrate/client/chain-spec/src/extension.rs b/substrate/client/chain-spec/src/extension.rs index f2939741535f..d1f035393496 100644 --- a/substrate/client/chain-spec/src/extension.rs +++ b/substrate/client/chain-spec/src/extension.rs @@ -284,7 +284,7 @@ where } } -/// A subset of the `Extension` trait that only allows for quering extensions. +/// A subset of the `Extension` trait that only allows for querying extensions. pub trait GetExtension { /// Get an extension of specific type. fn get_any(&self, t: TypeId) -> &dyn Any; diff --git a/substrate/client/chain-spec/src/genesis_config_builder.rs b/substrate/client/chain-spec/src/genesis_config_builder.rs index c8b54f66be6f..61f065e213cb 100644 --- a/substrate/client/chain-spec/src/genesis_config_builder.rs +++ b/substrate/client/chain-spec/src/genesis_config_builder.rs @@ -33,7 +33,7 @@ use std::borrow::Cow; /// A utility that facilitates calling the GenesisBuilder API from the runtime wasm code blob. /// /// `EHF` type allows to specify the extended host function required for building runtime's genesis -/// config. The type will be compbined with default `sp_io::SubstrateHostFunctions`. +/// config. The type will be combined with default `sp_io::SubstrateHostFunctions`. pub struct GenesisConfigBuilderRuntimeCaller<'a, EHF = ()> where EHF: HostFunctions, diff --git a/substrate/client/chain-spec/src/lib.rs b/substrate/client/chain-spec/src/lib.rs index eab5f789f29a..6da5fd26d526 100644 --- a/substrate/client/chain-spec/src/lib.rs +++ b/substrate/client/chain-spec/src/lib.rs @@ -140,7 +140,7 @@ //! A JSON object that provides an explicit and comprehensive representation of the //! RuntimeGenesisConfig struct, which is generated by frame::runtime::prelude::construct_runtime macro (polkadot_sdk_frame::runtime::prelude::construct_runtime macro (example of generated struct). Must contain all the keys of //! the genesis config, no defaults will be used. @@ -393,7 +393,7 @@ pub trait ChainSpec: BuildStorage + Send + Sync { fn protocol_id(&self) -> Option<&str>; /// Optional network fork identifier. `None` by default. fn fork_id(&self) -> Option<&str>; - /// Additional loosly-typed properties of the chain. + /// Additional loosely-typed properties of the chain. /// /// Returns an empty JSON object if 'properties' not defined in config fn properties(&self) -> Properties; diff --git a/substrate/client/cli/Cargo.toml b/substrate/client/cli/Cargo.toml index 47b29555e660..805d3ee117f4 100644 --- a/substrate/client/cli/Cargo.toml +++ b/substrate/client/cli/Cargo.toml @@ -20,7 +20,7 @@ array-bytes = "6.1" chrono = "0.4.31" clap = { version = "4.5.3", features = ["derive", "string", "wrap_help"] } fdlimit = "0.3.0" -futures = "0.3.21" +futures = "0.3.30" itertools = "0.10.3" libp2p-identity = { version = "0.1.3", features = ["ed25519", "peerid"] } log = { workspace = true, default-features = true } diff --git a/substrate/client/cli/src/commands/vanity.rs b/substrate/client/cli/src/commands/vanity.rs index ce7516132989..330a59493efc 100644 --- a/substrate/client/cli/src/commands/vanity.rs +++ b/substrate/client/cli/src/commands/vanity.rs @@ -51,7 +51,7 @@ pub struct VanityCmd { impl VanityCmd { /// Run the command pub fn run(&self) -> error::Result<()> { - let formated_seed = with_crypto_scheme!( + let formatted_seed = with_crypto_scheme!( self.crypto_scheme.scheme, generate_key( &self.pattern, @@ -62,7 +62,7 @@ impl VanityCmd { with_crypto_scheme!( self.crypto_scheme.scheme, print_from_uri( - &formated_seed, + &formatted_seed, None, self.network_scheme.network, self.output_scheme.output_type, diff --git a/substrate/client/cli/src/params/network_params.rs b/substrate/client/cli/src/params/network_params.rs index 12f19df2a685..94efb4280912 100644 --- a/substrate/client/cli/src/params/network_params.rs +++ b/substrate/client/cli/src/params/network_params.rs @@ -310,7 +310,7 @@ mod tests { } #[test] - fn sync_ingores_case() { + fn sync_ignores_case() { let params = Cli::try_parse_from(["", "--sync", "wArP"]).expect("Parses network params"); assert_eq!(SyncMode::Warp, params.network_params.sync); diff --git a/substrate/client/consensus/aura/Cargo.toml b/substrate/client/consensus/aura/Cargo.toml index 213f75974da0..64e2d16cd913 100644 --- a/substrate/client/consensus/aura/Cargo.toml +++ b/substrate/client/consensus/aura/Cargo.toml @@ -16,9 +16,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.6.1" } -futures = "0.3.21" +futures = "0.3.30" log = { workspace = true, default-features = true } thiserror = { workspace = true } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } diff --git a/substrate/client/consensus/aura/src/lib.rs b/substrate/client/consensus/aura/src/lib.rs index 1be7be8eeeaa..e220aaac508d 100644 --- a/substrate/client/consensus/aura/src/lib.rs +++ b/substrate/client/consensus/aura/src/lib.rs @@ -82,7 +82,7 @@ pub enum CompatibilityMode { None, /// Call `initialize_block` before doing any runtime calls. /// - /// Previously the node would execute `initialize_block` before fetchting the authorities + /// Previously the node would execute `initialize_block` before fetching the authorities /// from the runtime. This behaviour changed in: /// /// By calling `initialize_block` before fetching the authorities, on a block that diff --git a/substrate/client/consensus/babe/Cargo.toml b/substrate/client/consensus/babe/Cargo.toml index c98fb7112b7c..b001e3d117aa 100644 --- a/substrate/client/consensus/babe/Cargo.toml +++ b/substrate/client/consensus/babe/Cargo.toml @@ -17,9 +17,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } -futures = "0.3.21" +futures = "0.3.30" log = { workspace = true, default-features = true } num-bigint = "0.4.3" num-rational = "0.4.1" @@ -54,4 +54,4 @@ sc-network-test = { path = "../../network/test" } sp-timestamp = { path = "../../../primitives/timestamp" } sp-tracing = { path = "../../../primitives/tracing" } substrate-test-runtime-client = { path = "../../../test-utils/runtime/client" } -tokio = "1.22.0" +tokio = "1.37" diff --git a/substrate/client/consensus/babe/rpc/Cargo.toml b/substrate/client/consensus/babe/rpc/Cargo.toml index 043b566673e7..b2661bbde27e 100644 --- a/substrate/client/consensus/babe/rpc/Cargo.toml +++ b/substrate/client/consensus/babe/rpc/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] jsonrpsee = { version = "0.22", features = ["client-core", "macros", "server"] } -futures = "0.3.21" +futures = "0.3.30" serde = { features = ["derive"], workspace = true, default-features = true } thiserror = { workspace = true } sc-consensus-babe = { path = ".." } @@ -34,7 +34,7 @@ sp-runtime = { path = "../../../../primitives/runtime" } [dev-dependencies] serde_json = { workspace = true, default-features = true } -tokio = "1.22.0" +tokio = "1.37" sc-consensus = { path = "../../common" } sc-keystore = { path = "../../../keystore" } sc-transaction-pool-api = { path = "../../../transaction-pool/api" } diff --git a/substrate/client/consensus/babe/src/authorship.rs b/substrate/client/consensus/babe/src/authorship.rs index 11f5233abc6b..57ee706a04f6 100644 --- a/substrate/client/consensus/babe/src/authorship.rs +++ b/substrate/client/consensus/babe/src/authorship.rs @@ -59,7 +59,7 @@ pub(super) fn calculate_primary_threshold( assert!(theta > 0.0, "authority with weight 0."); // NOTE: in the equation `p = 1 - (1 - c)^theta` the value of `p` is always - // capped by `c`. For all pratical purposes `c` should always be set to a + // capped by `c`. For all practical purposes `c` should always be set to a // value < 0.5, as such in the computations below we should never be near // edge cases like `0.999999`. diff --git a/substrate/client/consensus/babe/src/lib.rs b/substrate/client/consensus/babe/src/lib.rs index ccf72939631a..d10bdd8c7e4c 100644 --- a/substrate/client/consensus/babe/src/lib.rs +++ b/substrate/client/consensus/babe/src/lib.rs @@ -1418,7 +1418,7 @@ where // Skip babe logic if block already in chain or importing blocks during initial sync, // otherwise the check for epoch changes will error because trying to re-import an - // epoch change or because of missing epoch data in the tree, respectivelly. + // epoch change or because of missing epoch data in the tree, respectively. if info.block_gap.map_or(false, |(s, e)| s <= number && number <= e) || block_status == BlockStatus::InChain { diff --git a/substrate/client/consensus/babe/src/migration.rs b/substrate/client/consensus/babe/src/migration.rs index bec2d0a61f4b..5f1ece3ec0e2 100644 --- a/substrate/client/consensus/babe/src/migration.rs +++ b/substrate/client/consensus/babe/src/migration.rs @@ -64,7 +64,7 @@ impl EpochT for EpochV0 { // Implement From for Epoch impl EpochV0 { - /// Migrate the sturct to current epoch version. + /// Migrate the struct to current epoch version. pub fn migrate(self, config: &BabeConfiguration) -> Epoch { sp_consensus_babe::Epoch { epoch_index: self.epoch_index, diff --git a/substrate/client/consensus/beefy/Cargo.toml b/substrate/client/consensus/beefy/Cargo.toml index 8552a4900223..c1d57baa394a 100644 --- a/substrate/client/consensus/beefy/Cargo.toml +++ b/substrate/client/consensus/beefy/Cargo.toml @@ -14,10 +14,10 @@ workspace = true [dependencies] array-bytes = "6.1" async-channel = "1.8.0" -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } fnv = "1.0.6" -futures = "0.3" +futures = "0.3.30" log = { workspace = true, default-features = true } parking_lot = "0.12.1" thiserror = { workspace = true } @@ -40,7 +40,7 @@ sp-crypto-hashing = { path = "../../../primitives/crypto/hashing" } sp-keystore = { path = "../../../primitives/keystore" } sp-mmr-primitives = { path = "../../../primitives/merkle-mountain-range" } sp-runtime = { path = "../../../primitives/runtime" } -tokio = "1.22.0" +tokio = "1.37" [dev-dependencies] diff --git a/substrate/client/consensus/beefy/README.md b/substrate/client/consensus/beefy/README.md index 13f88303a972..a7956cfcd42e 100644 --- a/substrate/client/consensus/beefy/README.md +++ b/substrate/client/consensus/beefy/README.md @@ -297,7 +297,7 @@ periodically on the global topic. Let's now dive into description of the message - Justification is considered worthwhile to gossip when: - It is for a recent (implementation specific) round or the latest mandatory round. - All signatures are valid and there is at least `2/3rd + 1` of them. - - Signatorees are part of the current validator set. + - Signatories are part of the current validator set. - Mandatory justifications should be announced periodically. ## Misbehavior diff --git a/substrate/client/consensus/beefy/rpc/Cargo.toml b/substrate/client/consensus/beefy/rpc/Cargo.toml index bb2ae4a08966..e46fc4f4410a 100644 --- a/substrate/client/consensus/beefy/rpc/Cargo.toml +++ b/substrate/client/consensus/beefy/rpc/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } -futures = "0.3.21" +futures = "0.3.30" jsonrpsee = { version = "0.22", features = ["client-core", "macros", "server"] } log = { workspace = true, default-features = true } parking_lot = "0.12.1" diff --git a/substrate/client/consensus/beefy/src/communication/mod.rs b/substrate/client/consensus/beefy/src/communication/mod.rs index 6fda63688e69..09c540e3b8a8 100644 --- a/substrate/client/consensus/beefy/src/communication/mod.rs +++ b/substrate/client/consensus/beefy/src/communication/mod.rs @@ -99,7 +99,7 @@ mod cost { // On-demand request was refused by peer. pub(super) const REFUSAL_RESPONSE: Rep = Rep::new(-100, "BEEFY: Proof request refused"); // On-demand request for a proof that can't be found in the backend. - pub(super) const UNKOWN_PROOF_REQUEST: Rep = Rep::new(-150, "BEEFY: Unknown proof request"); + pub(super) const UNKNOWN_PROOF_REQUEST: Rep = Rep::new(-150, "BEEFY: Unknown proof request"); } // benefit scalars for reporting peers. diff --git a/substrate/client/consensus/beefy/src/communication/request_response/incoming_requests_handler.rs b/substrate/client/consensus/beefy/src/communication/request_response/incoming_requests_handler.rs index d856e9748a10..ce184769fa77 100644 --- a/substrate/client/consensus/beefy/src/communication/request_response/incoming_requests_handler.rs +++ b/substrate/client/consensus/beefy/src/communication/request_response/incoming_requests_handler.rs @@ -170,7 +170,7 @@ where .flatten() .and_then(|hash| self.client.justifications(hash).ok().flatten()) .and_then(|justifs| justifs.get(BEEFY_ENGINE_ID).cloned()) - .ok_or_else(|| reputation_changes.push(cost::UNKOWN_PROOF_REQUEST)); + .ok_or_else(|| reputation_changes.push(cost::UNKNOWN_PROOF_REQUEST)); request .pending_response .send(netconfig::OutgoingResponse { diff --git a/substrate/client/consensus/beefy/src/keystore.rs b/substrate/client/consensus/beefy/src/keystore.rs index 2ddc938fbc6c..9582c2661c30 100644 --- a/substrate/client/consensus/beefy/src/keystore.rs +++ b/substrate/client/consensus/beefy/src/keystore.rs @@ -404,7 +404,7 @@ pub mod tests { let store: BeefyKeystore = Some(store).into(); - let msg = b"are you involved or commited?"; + let msg = b"are you involved or committed?"; let sig1 = store.sign(&alice, msg).unwrap(); let sig2 = Keyring::::Alice.sign(msg); @@ -440,7 +440,7 @@ pub mod tests { let alice = Keyring::Alice.public(); - let msg = b"are you involved or commited?"; + let msg = b"are you involved or committed?"; let sig = store.sign(&alice, msg).err().unwrap(); let err = Error::Signature(expected_error_message.to_string()); @@ -463,7 +463,7 @@ pub mod tests { let store: BeefyKeystore = None.into(); let alice = Keyring::Alice.public(); - let msg = b"are you involved or commited"; + let msg = b"are you involved or committed"; let sig = store.sign(&alice, msg).err().unwrap(); let err = Error::Keystore("no Keystore".to_string()); @@ -487,7 +487,7 @@ pub mod tests { let alice = Keyring::Alice.public(); // `msg` and `sig` match - let msg = b"are you involved or commited?"; + let msg = b"are you involved or committed?"; let sig = store.sign(&alice, msg).unwrap(); assert!(BeefyKeystore::verify(&alice, &sig, msg)); diff --git a/substrate/client/consensus/beefy/src/lib.rs b/substrate/client/consensus/beefy/src/lib.rs index 323af1bc8305..714a0fb7c885 100644 --- a/substrate/client/consensus/beefy/src/lib.rs +++ b/substrate/client/consensus/beefy/src/lib.rs @@ -222,6 +222,8 @@ pub struct BeefyParams { pub links: BeefyVoterLinks, /// Handler for incoming BEEFY justifications requests from a remote peer. pub on_demand_justifications_handler: BeefyJustifsRequestHandler, + /// Whether running under "Authority" role. + pub is_authority: bool, } /// Helper object holding BEEFY worker communication/gossip components. /// @@ -270,6 +272,7 @@ where min_block_delta: u32, gossip_validator: Arc>, finality_notifications: &mut Fuse>, + is_authority: bool, ) -> Result { // Wait for BEEFY pallet to be active before starting voter. let (beefy_genesis, best_grandpa) = @@ -283,6 +286,7 @@ where runtime.clone(), &key_store, &metrics, + is_authority, ) .await?; // Update the gossip validator with the right starting round and set id. @@ -301,6 +305,7 @@ where comms: BeefyComms, links: BeefyVoterLinks, pending_justifications: BTreeMap, BeefyVersionedFinalityProof>, + is_authority: bool, ) -> BeefyWorker { BeefyWorker { backend: self.backend, @@ -313,6 +318,7 @@ where comms, links, pending_justifications, + is_authority, } } @@ -423,6 +429,7 @@ where runtime: Arc, key_store: &BeefyKeystore, metrics: &Option, + is_authority: bool, ) -> Result, Error> { // Initialize voter state from AUX DB if compatible. if let Some(mut state) = crate::aux_schema::load_persistent(backend.as_ref())? @@ -455,7 +462,13 @@ where "🥩 Handling missed BEEFY session after node restart: {:?}.", new_session_start ); - state.init_session_at(new_session_start, validator_set, key_store, metrics); + state.init_session_at( + new_session_start, + validator_set, + key_store, + metrics, + is_authority, + ); } return Ok(state) } @@ -491,6 +504,7 @@ pub async fn start_beefy_gadget( prometheus_registry, links, mut on_demand_justifications_handler, + is_authority, } = beefy_params; let BeefyNetworkParams { @@ -553,6 +567,7 @@ pub async fn start_beefy_gadget( min_block_delta, beefy_comms.gossip_validator.clone(), &mut finality_notifications, + is_authority, ).fuse() => { match builder_init_result { Ok(builder) => break builder, @@ -580,6 +595,7 @@ pub async fn start_beefy_gadget( beefy_comms, links.clone(), BTreeMap::new(), + is_authority, ); match futures::future::select( diff --git a/substrate/client/consensus/beefy/src/tests.rs b/substrate/client/consensus/beefy/src/tests.rs index d106c9dcd881..aecfec7b9ed1 100644 --- a/substrate/client/consensus/beefy/src/tests.rs +++ b/substrate/client/consensus/beefy/src/tests.rs @@ -379,6 +379,7 @@ async fn voter_init_setup( Arc::new(api.clone()), &key_store, &metrics, + true, ) .await } @@ -438,6 +439,7 @@ where min_block_delta, prometheus_registry: None, on_demand_justifications_handler: on_demand_justif_handler, + is_authority: true, }; let task = crate::start_beefy_gadget::<_, _, _, _, _, _, _>(beefy_params); diff --git a/substrate/client/consensus/beefy/src/worker.rs b/substrate/client/consensus/beefy/src/worker.rs index c8eb19621ba5..ac6b72d1ea40 100644 --- a/substrate/client/consensus/beefy/src/worker.rs +++ b/substrate/client/consensus/beefy/src/worker.rs @@ -33,7 +33,7 @@ use crate::{ }; use codec::{Codec, Decode, DecodeAll, Encode}; use futures::{stream::Fuse, FutureExt, StreamExt}; -use log::{debug, error, info, log_enabled, trace, warn}; +use log::{debug, error, info, trace, warn}; use sc_client_api::{Backend, FinalityNotification, FinalityNotifications, HeaderBackend}; use sc_utils::notification::NotificationReceiver; use sp_api::ProvideRuntimeApi; @@ -51,7 +51,7 @@ use sp_runtime::{ SaturatedConversion, }; use std::{ - collections::{BTreeMap, BTreeSet, VecDeque}, + collections::{BTreeMap, VecDeque}, fmt::Debug, sync::Arc, }; @@ -76,13 +76,13 @@ pub(crate) enum RoundAction { pub(crate) struct VoterOracle { /// Queue of known sessions. Keeps track of voting rounds (block numbers) within each session. /// - /// There are three voter states coresponding to three queue states: + /// There are three voter states corresponding to three queue states: /// 1. voter uninitialized: queue empty, /// 2. up-to-date - all mandatory blocks leading up to current GRANDPA finalized: queue has ONE /// element, the 'current session' where `mandatory_done == true`, /// 3. lagging behind GRANDPA: queue has [1, N] elements, where all `mandatory_done == false`. - /// In this state, everytime a session gets its mandatory block BEEFY finalized, it's popped - /// off the queue, eventually getting to state `2. up-to-date`. + /// In this state, every time a session gets its mandatory block BEEFY finalized, it's + /// popped off the queue, eventually getting to state `2. up-to-date`. sessions: VecDeque>, /// Min delta in block numbers between two blocks, BEEFY should vote on. min_block_delta: u32, @@ -332,6 +332,7 @@ impl PersistedState { validator_set: ValidatorSet, key_store: &BeefyKeystore, metrics: &Option, + is_authority: bool, ) { debug!(target: LOG_TARGET, "🥩 New active validator set: {:?}", validator_set); @@ -348,11 +349,16 @@ impl PersistedState { } } - if log_enabled!(target: LOG_TARGET, log::Level::Debug) { - // verify the new validator set - only do it if we're also logging the warning - if verify_validator_set::(&new_session_start, &validator_set, key_store).is_err() { - metric_inc!(metrics, beefy_no_authority_found_in_store); - } + // verify we have some BEEFY key available in keystore when role is authority. + if is_authority && key_store.public_keys().map_or(false, |k| k.is_empty()) { + error!( + target: LOG_TARGET, + "🥩 for session starting at block {:?} no BEEFY authority key found in store, \ + you must generate valid session keys \ + (https://wiki.polkadot.network/docs/maintain-guides-how-to-validate-polkadot#generating-the-session-keys)", + new_session_start, + ); + metric_inc!(metrics, beefy_no_authority_found_in_store); } let id = validator_set.id(); @@ -390,6 +396,8 @@ pub(crate) struct BeefyWorker { pub persisted_state: PersistedState, /// BEEFY voter metrics pub metrics: Option, + /// Node runs under "Authority" role. + pub is_authority: bool, } impl BeefyWorker @@ -425,6 +433,7 @@ where validator_set, &self.key_store, &self.metrics, + self.is_authority, ); } @@ -1040,33 +1049,6 @@ where } } -/// Verify `active` validator set for `block` against the key store -/// -/// We want to make sure that we have _at least one_ key in our keystore that -/// is part of the validator set, that's because if there are no local keys -/// then we can't perform our job as a validator. -/// -/// Note that for a non-authority node there will be no keystore, and we will -/// return an error and don't check. The error can usually be ignored. -fn verify_validator_set( - block: &NumberFor, - active: &ValidatorSet, - key_store: &BeefyKeystore, -) -> Result<(), Error> { - let active: BTreeSet<&AuthorityId> = active.validators().iter().collect(); - - let public_keys = key_store.public_keys()?; - let store: BTreeSet<&AuthorityId> = public_keys.iter().collect(); - - if store.intersection(&active).count() == 0 { - let msg = "no authority public key found in store".to_string(); - debug!(target: LOG_TARGET, "🥩 for block {:?} {}", block, msg); - Err(Error::Keystore(msg)) - } else { - Ok(()) - } -} - #[cfg(test)] pub(crate) mod tests { use super::*; @@ -1208,6 +1190,7 @@ pub(crate) mod tests { comms, pending_justifications: BTreeMap::new(), persisted_state, + is_authority: true, } } @@ -1471,32 +1454,6 @@ pub(crate) mod tests { assert_eq!(extracted, Some(validator_set)); } - #[tokio::test] - async fn keystore_vs_validator_set() { - let keys = &[Keyring::Alice]; - let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); - let mut net = BeefyTestNet::new(1); - let mut worker = create_beefy_worker(net.peer(0), &keys[0], 1, validator_set.clone()); - - // keystore doesn't contain other keys than validators' - assert_eq!(verify_validator_set::(&1, &validator_set, &worker.key_store), Ok(())); - - // unknown `Bob` key - let keys = &[Keyring::Bob]; - let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); - let err_msg = "no authority public key found in store".to_string(); - let expected = Err(Error::Keystore(err_msg)); - assert_eq!(verify_validator_set::(&1, &validator_set, &worker.key_store), expected); - - // worker has no keystore - worker.key_store = None.into(); - let expected_err = Err(Error::Keystore("no Keystore".into())); - assert_eq!( - verify_validator_set::(&1, &validator_set, &worker.key_store), - expected_err - ); - } - #[tokio::test] async fn should_finalize_correctly() { let keys = [Keyring::Alice]; diff --git a/substrate/client/consensus/common/Cargo.toml b/substrate/client/consensus/common/Cargo.toml index 7f36dfc09ef8..b2738a1d12d9 100644 --- a/substrate/client/consensus/common/Cargo.toml +++ b/substrate/client/consensus/common/Cargo.toml @@ -6,7 +6,7 @@ edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository.workspace = true -description = "Collection of common consensus specific imlementations for Substrate (client)" +description = "Collection of common consensus specific implementations for Substrate (client)" readme = "README.md" [lints] @@ -16,8 +16,8 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.74" -futures = { version = "0.3.21", features = ["thread-pool"] } +async-trait = "0.1.79" +futures = { version = "0.3.30", features = ["thread-pool"] } futures-timer = "3.0.1" libp2p-identity = { version = "0.1.3", features = ["ed25519", "peerid"] } log = { workspace = true, default-features = true } diff --git a/substrate/client/consensus/common/src/import_queue.rs b/substrate/client/consensus/common/src/import_queue.rs index 39d5bf8ed35d..062e244a912e 100644 --- a/substrate/client/consensus/common/src/import_queue.rs +++ b/substrate/client/consensus/common/src/import_queue.rs @@ -133,7 +133,7 @@ pub trait ImportQueue: Send { /// Start asynchronous runner for import queue. /// /// Takes an object implementing [`Link`] which allows the import queue to - /// influece the synchronization process. + /// influence the synchronization process. async fn run(self, link: Box>); } diff --git a/substrate/client/consensus/common/src/import_queue/basic_queue.rs b/substrate/client/consensus/common/src/import_queue/basic_queue.rs index 1cc7ec26fd19..125d4f104c19 100644 --- a/substrate/client/consensus/common/src/import_queue/basic_queue.rs +++ b/substrate/client/consensus/common/src/import_queue/basic_queue.rs @@ -187,7 +187,7 @@ impl ImportQueue for BasicQueue { /// Start asynchronous runner for import queue. /// /// Takes an object implementing [`Link`] which allows the import queue to - /// influece the synchronization process. + /// influence the synchronization process. async fn run(mut self, mut link: Box>) { loop { if let Err(_) = self.result_port.next_action(&mut *link).await { @@ -198,7 +198,7 @@ impl ImportQueue for BasicQueue { } } -/// Messages destinated to the background worker. +/// Messages designated to the background worker. mod worker_messages { use super::*; diff --git a/substrate/client/consensus/epochs/src/lib.rs b/substrate/client/consensus/epochs/src/lib.rs index 29bb18e147c2..bbc143b7bd30 100644 --- a/substrate/client/consensus/epochs/src/lib.rs +++ b/substrate/client/consensus/epochs/src/lib.rs @@ -326,7 +326,7 @@ impl AsRef for IncrementedEpoch { /// /// The first epoch, epoch_0, is special cased by saying that it starts at /// slot number of the first block in the chain. When bootstrapping a chain, -/// there can be multiple competing block #1s, so we have to ensure that the overlayed +/// there can be multiple competing block #1s, so we have to ensure that the overlaid /// DAG doesn't get confused. /// /// The first block of every epoch should be producing a descriptor for the next @@ -655,7 +655,7 @@ where /// Revert to a specified block given its `hash` and `number`. /// This removes all the epoch changes information that were announced by - /// all the given block descendents. + /// all the given block descendants. pub fn revert>( &mut self, descendent_of_builder: D, diff --git a/substrate/client/consensus/grandpa/Cargo.toml b/substrate/client/consensus/grandpa/Cargo.toml index 1ab953525220..797b4ea35b29 100644 --- a/substrate/client/consensus/grandpa/Cargo.toml +++ b/substrate/client/consensus/grandpa/Cargo.toml @@ -19,10 +19,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] ahash = "0.8.2" array-bytes = "6.1" -async-trait = "0.1.74" +async-trait = "0.1.79" dyn-clone = "1.0" finality-grandpa = { version = "0.16.2", features = ["derive-codec"] } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.1" log = { workspace = true, default-features = true } parity-scale-codec = { version = "3.6.1", features = ["derive"] } @@ -58,7 +58,7 @@ sp-runtime = { path = "../../../primitives/runtime" } assert_matches = "1.3.0" finality-grandpa = { version = "0.16.2", features = ["derive-codec", "test-helpers"] } serde = { workspace = true, default-features = true } -tokio = "1.22.0" +tokio = "1.37" sc-network = { path = "../../network" } sc-network-test = { path = "../../network/test" } sp-keyring = { path = "../../../primitives/keyring" } diff --git a/substrate/client/consensus/grandpa/rpc/Cargo.toml b/substrate/client/consensus/grandpa/rpc/Cargo.toml index f7e87415448e..0789a429ac41 100644 --- a/substrate/client/consensus/grandpa/rpc/Cargo.toml +++ b/substrate/client/consensus/grandpa/rpc/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] finality-grandpa = { version = "0.16.2", features = ["derive-codec"] } -futures = "0.3.16" +futures = "0.3.30" jsonrpsee = { version = "0.22", features = ["client-core", "macros", "server"] } log = { workspace = true, default-features = true } parity-scale-codec = { version = "3.6.1", features = ["derive"] } diff --git a/substrate/client/consensus/grandpa/src/communication/mod.rs b/substrate/client/consensus/grandpa/src/communication/mod.rs index 5c7e1276297a..6e87d6bf9a28 100644 --- a/substrate/client/consensus/grandpa/src/communication/mod.rs +++ b/substrate/client/consensus/grandpa/src/communication/mod.rs @@ -222,13 +222,13 @@ pub(crate) struct NetworkBridge, S: Syncing> { neighbor_sender: periodic::NeighborPacketSender, /// `NeighborPacketWorker` processing packets sent through the `NeighborPacketSender`. - // `NetworkBridge` is required to be cloneable, thus one needs to be able to clone its + // `NetworkBridge` is required to be clonable, thus one needs to be able to clone its // children, thus one has to wrap `neighbor_packet_worker` with an `Arc` `Mutex`. neighbor_packet_worker: Arc>>, /// Receiver side of the peer report stream populated by the gossip validator, forwarded to the /// gossip engine. - // `NetworkBridge` is required to be cloneable, thus one needs to be able to clone its + // `NetworkBridge` is required to be clonable, thus one needs to be able to clone its // children, thus one has to wrap gossip_validator_report_stream with an `Arc` `Mutex`. Given // that it is just an `UnboundedReceiver`, one could also switch to a // multi-producer-*multi*-consumer channel implementation. @@ -766,7 +766,7 @@ impl Sink> for OutgoingMessages { ) .ok_or_else(|| { Error::Signing(format!( - "Failed to sign GRANDPA vote for round {} targetting {:?}", + "Failed to sign GRANDPA vote for round {} targeting {:?}", self.round, target_hash )) })?; diff --git a/substrate/client/consensus/grandpa/src/communication/periodic.rs b/substrate/client/consensus/grandpa/src/communication/periodic.rs index daa752920287..9d0e76b7c80b 100644 --- a/substrate/client/consensus/grandpa/src/communication/periodic.rs +++ b/substrate/client/consensus/grandpa/src/communication/periodic.rs @@ -106,7 +106,7 @@ impl Stream for NeighborPacketWorker { // Make sure the underlying task is scheduled for wake-up. // - // Note: In case poll_unpin is called after the resetted delay fires again, this + // Note: In case poll_unpin is called after the reset delay fires again, this // will drop one tick. Deemed as very unlikely and also not critical. while this.delay.poll_unpin(cx).is_ready() {} diff --git a/substrate/client/consensus/grandpa/src/tests.rs b/substrate/client/consensus/grandpa/src/tests.rs index 7e42c2d45c73..14708cc89e89 100644 --- a/substrate/client/consensus/grandpa/src/tests.rs +++ b/substrate/client/consensus/grandpa/src/tests.rs @@ -1805,7 +1805,7 @@ async fn grandpa_environment_checks_if_best_block_is_descendent_of_finality_targ ); // best block is higher than finality target and it's on the same fork, - // the best block passed to the voting rule should not be overriden + // the best block passed to the voting rule should not be overridden select_chain.set_best_chain(client.expect_header(hashof10_a).unwrap()); select_chain.set_finality_target(client.expect_header(hashof5_a).unwrap().hash()); voting_rule.set_expected_best_block(hashof10_a); @@ -1940,7 +1940,7 @@ async fn justification_with_equivocation() { precommits.push(precommit); } - // we create an equivocation for the 67th validator targetting blocks #1 and #2. + // we create an equivocation for the 67th validator targeting blocks #1 and #2. // this should be accounted as "voting for all blocks" and therefore block #3 will // have 67/100 votes, reaching finality threshold. { diff --git a/substrate/client/consensus/grandpa/src/until_imported.rs b/substrate/client/consensus/grandpa/src/until_imported.rs index 14f32ecc8836..f3874086b581 100644 --- a/substrate/client/consensus/grandpa/src/until_imported.rs +++ b/substrate/client/consensus/grandpa/src/until_imported.rs @@ -1002,7 +1002,7 @@ mod tests { } #[test] - fn block_global_message_wait_completed_return_none_on_block_number_missmatch() { + fn block_global_message_wait_completed_return_none_on_block_number_mismatch() { let msg_inner = test_catch_up(); let waiting_block_1 = diff --git a/substrate/client/consensus/grandpa/src/voting_rule.rs b/substrate/client/consensus/grandpa/src/voting_rule.rs index e09780739c73..c37596d20f68 100644 --- a/substrate/client/consensus/grandpa/src/voting_rule.rs +++ b/substrate/client/consensus/grandpa/src/voting_rule.rs @@ -196,7 +196,7 @@ where target_header = backend .header(target_hash) .ok()? - .expect("Header known to exist due to the existence of one of its descendents; qed"); + .expect("Header known to exist due to the existence of one of its descendants; qed"); } } diff --git a/substrate/client/consensus/grandpa/src/warp_proof.rs b/substrate/client/consensus/grandpa/src/warp_proof.rs index 29111712ec38..7169a424c14a 100644 --- a/substrate/client/consensus/grandpa/src/warp_proof.rs +++ b/substrate/client/consensus/grandpa/src/warp_proof.rs @@ -249,7 +249,7 @@ impl> NetworkProvider: BlockNumberOps, { - /// Create a new istance for a given backend and authority set. + /// Create a new instance for a given backend and authority set. pub fn new( backend: Arc, authority_set: SharedAuthoritySet>, diff --git a/substrate/client/consensus/manual-seal/Cargo.toml b/substrate/client/consensus/manual-seal/Cargo.toml index ac32fed72289..1422d46105b2 100644 --- a/substrate/client/consensus/manual-seal/Cargo.toml +++ b/substrate/client/consensus/manual-seal/Cargo.toml @@ -18,9 +18,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] jsonrpsee = { version = "0.22", features = ["client-core", "macros", "server"] } assert_matches = "1.3.0" -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.6.1" } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.1" log = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } diff --git a/substrate/client/consensus/manual-seal/src/consensus/babe.rs b/substrate/client/consensus/manual-seal/src/consensus/babe.rs index 26fa81459808..bc56ce022714 100644 --- a/substrate/client/consensus/manual-seal/src/consensus/babe.rs +++ b/substrate/client/consensus/manual-seal/src/consensus/babe.rs @@ -82,7 +82,7 @@ pub struct BabeVerifier { } impl BabeVerifier { - /// create a nrew verifier + /// create a new verifier pub fn new(epoch_changes: SharedEpochChanges, client: Arc) -> BabeVerifier { BabeVerifier { epoch_changes, client } } diff --git a/substrate/client/consensus/pow/Cargo.toml b/substrate/client/consensus/pow/Cargo.toml index 0791514035b4..ecfa29aa194d 100644 --- a/substrate/client/consensus/pow/Cargo.toml +++ b/substrate/client/consensus/pow/Cargo.toml @@ -16,9 +16,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.1" log = { workspace = true, default-features = true } parking_lot = "0.12.1" diff --git a/substrate/client/consensus/slots/Cargo.toml b/substrate/client/consensus/slots/Cargo.toml index 75f8b29a2fd7..4ac6ce907137 100644 --- a/substrate/client/consensus/slots/Cargo.toml +++ b/substrate/client/consensus/slots/Cargo.toml @@ -17,9 +17,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.6.1" } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.1" log = { workspace = true, default-features = true } sc-client-api = { path = "../../api" } diff --git a/substrate/client/consensus/slots/src/lib.rs b/substrate/client/consensus/slots/src/lib.rs index 12636aae7a44..d9d792005312 100644 --- a/substrate/client/consensus/slots/src/lib.rs +++ b/substrate/client/consensus/slots/src/lib.rs @@ -555,7 +555,7 @@ impl SlotProportion { Self(inner.clamp(0.0, 1.0)) } - /// Returns the inner that is guaranted to be in the range `[0,1]`. + /// Returns the inner that is guaranteed to be in the range `[0,1]`. pub fn get(&self) -> f32 { self.0 } @@ -648,7 +648,7 @@ pub fn proposing_remaining_duration( } /// Calculate a slot duration lenience based on the number of missed slots from current -/// to parent. If the number of skipped slots is greated than 0 this method will apply +/// to parent. If the number of skipped slots is greater than 0 this method will apply /// an exponential backoff of at most `2^7 * slot_duration`, if no slots were skipped /// this method will return `None.` pub fn slot_lenience_exponential( @@ -680,7 +680,7 @@ pub fn slot_lenience_exponential( } /// Calculate a slot duration lenience based on the number of missed slots from current -/// to parent. If the number of skipped slots is greated than 0 this method will apply +/// to parent. If the number of skipped slots is greater than 0 this method will apply /// a linear backoff of at most `20 * slot_duration`, if no slots were skipped /// this method will return `None.` pub fn slot_lenience_linear( diff --git a/substrate/client/db/src/upgrade.rs b/substrate/client/db/src/upgrade.rs index f1e503867dfc..475220d99919 100644 --- a/substrate/client/db/src/upgrade.rs +++ b/substrate/client/db/src/upgrade.rs @@ -79,7 +79,7 @@ impl fmt::Display for UpgradeError { write!(f, "Database version comes from future version of the client: {}", version) }, UpgradeError::DecodingJustificationBlock => { - write!(f, "Decodoning justification block failed") + write!(f, "Decoding justification block failed") }, UpgradeError::Io(err) => write!(f, "Io error: {}", err), } diff --git a/substrate/client/db/src/utils.rs b/substrate/client/db/src/utils.rs index abf9c4629cee..b532e0d46662 100644 --- a/substrate/client/db/src/utils.rs +++ b/substrate/client/db/src/utils.rs @@ -338,7 +338,7 @@ fn open_kvdb_rocksdb( db_config.memory_budget = memory_budget; let db = kvdb_rocksdb::Database::open(&db_config, path)?; - // write database version only after the database is succesfully opened + // write database version only after the database is successfully opened crate::upgrade::update_version(path)?; Ok(sp_database::as_database(db)) } diff --git a/substrate/client/executor/Cargo.toml b/substrate/client/executor/Cargo.toml index 7fad7e3a2a0e..cb0befe98710 100644 --- a/substrate/client/executor/Cargo.toml +++ b/substrate/client/executor/Cargo.toml @@ -51,7 +51,7 @@ tracing-subscriber = "0.2.19" paste = "1.0" regex = "1.6.0" criterion = "0.4.0" -env_logger = "0.9" +env_logger = "0.11" num_cpus = "1.13.1" tempfile = "3.3.0" diff --git a/substrate/client/executor/common/src/error.rs b/substrate/client/executor/common/src/error.rs index b7c7e2b70198..9d489eaae420 100644 --- a/substrate/client/executor/common/src/error.rs +++ b/substrate/client/executor/common/src/error.rs @@ -145,7 +145,7 @@ pub enum WasmError { #[error("{0}")] Instantiation(String), - /// Other error happenend. + /// Other error happened. #[error("Other error happened while constructing the runtime: {0}")] Other(String), } diff --git a/substrate/client/executor/runtime-test/src/lib.rs b/substrate/client/executor/runtime-test/src/lib.rs index e86b48ad594c..0313c00d9e1d 100644 --- a/substrate/client/executor/runtime-test/src/lib.rs +++ b/substrate/client/executor/runtime-test/src/lib.rs @@ -183,7 +183,7 @@ sp_core::wasm_export_functions! { sig.copy_from_slice(&input[32..96]); let msg = b"all ok!"; - ed25519_verify(&ed25519::Signature(sig), &msg[..], &ed25519::Public(pubkey)) + ed25519_verify(&ed25519::Signature::from(sig), &msg[..], &ed25519::Public::from(pubkey)) } fn test_sr25519_verify(input: Vec) -> bool { @@ -194,7 +194,7 @@ sp_core::wasm_export_functions! { sig.copy_from_slice(&input[32..96]); let msg = b"all ok!"; - sr25519_verify(&sr25519::Signature(sig), &msg[..], &sr25519::Public(pubkey)) + sr25519_verify(&sr25519::Signature::from(sig), &msg[..], &sr25519::Public::from(pubkey)) } fn test_ordered_trie_root() -> Vec { diff --git a/substrate/client/executor/wasmtime/Cargo.toml b/substrate/client/executor/wasmtime/Cargo.toml index 75cc76a23543..f3fef4046914 100644 --- a/substrate/client/executor/wasmtime/Cargo.toml +++ b/substrate/client/executor/wasmtime/Cargo.toml @@ -30,7 +30,7 @@ wasmtime = { version = "8.0.1", default-features = false, features = [ "parallel-compilation", "pooling-allocator", ] } -anyhow = "1.0.68" +anyhow = "1.0.81" sc-allocator = { path = "../../allocator" } sc-executor-common = { path = "../common" } sp-runtime-interface = { path = "../../../primitives/runtime-interface" } diff --git a/substrate/client/executor/wasmtime/src/runtime.rs b/substrate/client/executor/wasmtime/src/runtime.rs index 595cc503272e..286d134ecd17 100644 --- a/substrate/client/executor/wasmtime/src/runtime.rs +++ b/substrate/client/executor/wasmtime/src/runtime.rs @@ -462,7 +462,7 @@ pub struct Semantics { pub struct Config { /// The WebAssembly standard requires all imports of an instantiated module to be resolved, /// otherwise, the instantiation fails. If this option is set to `true`, then this behavior is - /// overriden and imports that are requested by the module and not provided by the host + /// overridden and imports that are requested by the module and not provided by the host /// functions will be resolved using stubs. These stubs will trap upon a call. pub allow_missing_func_imports: bool, diff --git a/substrate/client/executor/wasmtime/src/tests.rs b/substrate/client/executor/wasmtime/src/tests.rs index 1c06da1e3c14..f86a42757694 100644 --- a/substrate/client/executor/wasmtime/src/tests.rs +++ b/substrate/client/executor/wasmtime/src/tests.rs @@ -254,7 +254,7 @@ fn test_nan_canonicalization(instantiation_strategy: InstantiationStrategy) { /// A NaN with canonical payload bits. const CANONICAL_NAN_BITS: u32 = 0x7fc00000; - /// A NaN value with an abitrary payload. + /// A NaN value with an arbitrary payload. const ARBITRARY_NAN_BITS: u32 = 0x7f812345; // This test works like this: we essentially do @@ -272,7 +272,7 @@ fn test_nan_canonicalization(instantiation_strategy: InstantiationStrategy) { // However, with the `canonicalize_nans` option turned on above, we expect that the output will // be a canonical NaN. // - // We exterpolate the results of this tests so that we assume that all intermediate computations + // We extrapolate the results of this tests so that we assume that all intermediate computations // that involve floats are sanitized and cannot produce a non-deterministic NaN. let params = (u32::to_le_bytes(ARBITRARY_NAN_BITS), u32::to_le_bytes(1)).encode(); diff --git a/substrate/client/informant/Cargo.toml b/substrate/client/informant/Cargo.toml index bd15e94ebafa..191ef5f19f8d 100644 --- a/substrate/client/informant/Cargo.toml +++ b/substrate/client/informant/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] ansi_term = "0.12.1" -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.1" log = { workspace = true, default-features = true } sc-client-api = { path = "../api" } diff --git a/substrate/client/merkle-mountain-range/Cargo.toml b/substrate/client/merkle-mountain-range/Cargo.toml index 60232bccb0e0..46b7a1011c46 100644 --- a/substrate/client/merkle-mountain-range/Cargo.toml +++ b/substrate/client/merkle-mountain-range/Cargo.toml @@ -15,7 +15,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } -futures = "0.3" +futures = "0.3.30" log = { workspace = true, default-features = true } sp-api = { path = "../../primitives/api" } sp-blockchain = { path = "../../primitives/blockchain" } @@ -32,4 +32,4 @@ parking_lot = "0.12.1" sc-block-builder = { path = "../block-builder" } sp-tracing = { path = "../../primitives/tracing" } substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } -tokio = "1.17.0" +tokio = "1.37" diff --git a/substrate/client/mixnet/Cargo.toml b/substrate/client/mixnet/Cargo.toml index 736184f4668c..3beeae9f9b14 100644 --- a/substrate/client/mixnet/Cargo.toml +++ b/substrate/client/mixnet/Cargo.toml @@ -21,7 +21,7 @@ arrayvec = "0.7.2" blake2 = "0.10.4" bytes = "1" codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -futures = "0.3.25" +futures = "0.3.30" futures-timer = "3.0.2" libp2p-identity = { version = "0.1.3", features = ["peerid"] } log = { workspace = true, default-features = true } diff --git a/substrate/client/network-gossip/Cargo.toml b/substrate/client/network-gossip/Cargo.toml index a14761c0d6e8..346e6bd6a5c6 100644 --- a/substrate/client/network-gossip/Cargo.toml +++ b/substrate/client/network-gossip/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] ahash = "0.8.2" -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.1" libp2p = "0.51.4" log = { workspace = true, default-features = true } @@ -31,8 +31,8 @@ sc-network-sync = { path = "../network/sync" } sp-runtime = { path = "../../primitives/runtime" } [dev-dependencies] -tokio = "1.22.0" -async-trait = "0.1.74" +tokio = "1.37" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } quickcheck = { version = "1.0.3", default-features = false } substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } diff --git a/substrate/client/network/Cargo.toml b/substrate/client/network/Cargo.toml index cbf74440dc1a..a891336d2417 100644 --- a/substrate/client/network/Cargo.toml +++ b/substrate/client/network/Cargo.toml @@ -19,17 +19,17 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = "6.1" async-channel = "1.8.0" -async-trait = "0.1" +async-trait = "0.1.79" asynchronous-codec = "0.6" bytes = "1" codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } either = "1.5.3" fnv = "1.0.6" -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" ip_network = "0.4.1" libp2p = { version = "0.51.4", features = ["dns", "identify", "kad", "macros", "mdns", "noise", "ping", "request-response", "tcp", "tokio", "websocket", "yamux"] } -linked_hash_set = "0.1.3" +linked_hash_set = "0.1.4" log = { workspace = true, default-features = true } mockall = "0.11.3" parking_lot = "0.12.1" diff --git a/substrate/client/network/bitswap/Cargo.toml b/substrate/client/network/bitswap/Cargo.toml index 7ef3ea212427..587e2e70867b 100644 --- a/substrate/client/network/bitswap/Cargo.toml +++ b/substrate/client/network/bitswap/Cargo.toml @@ -21,7 +21,7 @@ prost-build = "0.11" [dependencies] async-channel = "1.8.0" cid = "0.9.0" -futures = "0.3.21" +futures = "0.3.30" libp2p-identity = { version = "0.1.3", features = ["peerid"] } log = { workspace = true, default-features = true } prost = "0.12" diff --git a/substrate/client/network/bitswap/src/lib.rs b/substrate/client/network/bitswap/src/lib.rs index 8148fcd4ffe4..f47eaf1ae7c3 100644 --- a/substrate/client/network/bitswap/src/lib.rs +++ b/substrate/client/network/bitswap/src/lib.rs @@ -301,7 +301,7 @@ mod tests { use substrate_test_runtime_client::{self, prelude::*, TestClientBuilder}; #[tokio::test] - async fn undecodeable_message() { + async fn undecodable_message() { let client = substrate_test_runtime_client::new(); let (bitswap, config) = BitswapRequestHandler::new(Arc::new(client)); diff --git a/substrate/client/network/common/Cargo.toml b/substrate/client/network/common/Cargo.toml index 7a6d904b74b1..f9248b0bb51c 100644 --- a/substrate/client/network/common/Cargo.toml +++ b/substrate/client/network/common/Cargo.toml @@ -19,12 +19,12 @@ targets = ["x86_64-unknown-linux-gnu"] prost-build = "0.11" [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" bitflags = "1.3.2" codec = { package = "parity-scale-codec", version = "3.6.1", features = [ "derive", ] } -futures = "0.3.21" +futures = "0.3.30" libp2p-identity = { version = "0.1.3", features = ["peerid"] } sc-consensus = { path = "../../consensus/common" } sp-consensus = { path = "../../../primitives/consensus/common" } diff --git a/substrate/client/network/light/Cargo.toml b/substrate/client/network/light/Cargo.toml index c757f727fb71..2628fd07d3e4 100644 --- a/substrate/client/network/light/Cargo.toml +++ b/substrate/client/network/light/Cargo.toml @@ -24,7 +24,7 @@ array-bytes = "6.1" codec = { package = "parity-scale-codec", version = "3.6.1", features = [ "derive", ] } -futures = "0.3.21" +futures = "0.3.30" libp2p-identity = { version = "0.1.3", features = ["peerid"] } log = { workspace = true, default-features = true } prost = "0.12" diff --git a/substrate/client/network/src/discovery.rs b/substrate/client/network/src/discovery.rs index 77c26266aac4..4e2121c5540d 100644 --- a/substrate/client/network/src/discovery.rs +++ b/substrate/client/network/src/discovery.rs @@ -72,6 +72,7 @@ use libp2p::{ }, PeerId, }; +use linked_hash_set::LinkedHashSet; use log::{debug, info, trace, warn}; use sp_core::hexdisplay::HexDisplay; use std::{ @@ -550,14 +551,20 @@ impl NetworkBehaviour for DiscoveryBehaviour { ) -> Result, ConnectionDenied> { let Some(peer_id) = maybe_peer else { return Ok(Vec::new()) }; - let mut list = self + // Collect addresses into [`LinkedHashSet`] to eliminate duplicate entries preserving the + // order of addresses. Give priority to `permanent_addresses` (used with reserved nodes) and + // `ephemeral_addresses` (used for addresses discovered from other sources, like authority + // discovery DHT records). + let mut list: LinkedHashSet<_> = self .permanent_addresses .iter() .filter_map(|(p, a)| (*p == peer_id).then_some(a.clone())) - .collect::>(); + .collect(); if let Some(ephemeral_addresses) = self.ephemeral_addresses.get(&peer_id) { - list.extend(ephemeral_addresses.clone()); + ephemeral_addresses.iter().for_each(|address| { + list.insert_if_absent(address.clone()); + }); } { @@ -583,12 +590,14 @@ impl NetworkBehaviour for DiscoveryBehaviour { }); } - list.extend(list_to_filter); + list_to_filter.into_iter().for_each(|address| { + list.insert_if_absent(address); + }); } trace!(target: "sub-libp2p", "Addresses of {:?}: {:?}", peer_id, list); - Ok(list) + Ok(list.into_iter().collect()) } fn on_swarm_event(&mut self, event: FromSwarm) { diff --git a/substrate/client/network/src/protocol/notifications/behaviour.rs b/substrate/client/network/src/protocol/notifications/behaviour.rs index 9ad41e376e82..b945d4bfc604 100644 --- a/substrate/client/network/src/protocol/notifications/behaviour.rs +++ b/substrate/client/network/src/protocol/notifications/behaviour.rs @@ -2668,7 +2668,7 @@ mod tests { // // there is not straight-forward way of adding backoff to `PeerState::Disabled` // so manually adjust the value in order to progress on to the next stage. - // This modification together with `ConnectionClosed` will conver the peer + // This modification together with `ConnectionClosed` will convert the peer // state into `PeerState::Backoff`. if let Some(PeerState::Disabled { ref mut backoff_until, .. }) = notif.peers.get_mut(&(peer, set_id)) diff --git a/substrate/client/network/src/protocol/notifications/handler.rs b/substrate/client/network/src/protocol/notifications/handler.rs index 28662be29fee..391252c3ffe7 100644 --- a/substrate/client/network/src/protocol/notifications/handler.rs +++ b/substrate/client/network/src/protocol/notifications/handler.rs @@ -211,7 +211,7 @@ enum State { /// consequently trying to open the various notifications substreams. /// /// A [`NotifsHandlerOut::OpenResultOk`] or a [`NotifsHandlerOut::OpenResultErr`] event must - /// be emitted when transitionning to respectively [`State::Open`] or [`State::Closed`]. + /// be emitted when transitioning to respectively [`State::Open`] or [`State::Closed`]. Opening { /// Substream opened by the remote. If `Some`, has been accepted. in_substream: Option>, diff --git a/substrate/client/network/src/protocol/notifications/service/mod.rs b/substrate/client/network/src/protocol/notifications/service/mod.rs index 62e6d88a3d5a..dfb19daa28ef 100644 --- a/substrate/client/network/src/protocol/notifications/service/mod.rs +++ b/substrate/client/network/src/protocol/notifications/service/mod.rs @@ -54,7 +54,7 @@ const COMMAND_QUEUE_SIZE: usize = 64; /// Type representing subscribers of a notification protocol. type Subscribers = Arc>>>; -/// Type represending a distributable message sink. +/// Type representing a distributable message sink. /// Detached message sink must carry the protocol name for registering metrics. /// /// See documentation for [`PeerContext`] for more details. @@ -175,11 +175,11 @@ pub enum NotificationCommand { /// and an additional, distributable `NotificationsSink` which the protocol may acquire /// if it wishes to send notifications through `NotificationsSink` directly. /// -/// The distributable `NoticationsSink` is wrapped in an `Arc>` to allow +/// The distributable `NotificationsSink` is wrapped in an `Arc>` to allow /// `NotificationsService` to swap the underlying sink in case it's replaced. #[derive(Debug, Clone)] struct PeerContext { - /// Sink for sending notificaitons. + /// Sink for sending notifications. sink: NotificationsSink, /// Distributable notification sink. @@ -338,7 +338,8 @@ impl NotificationService for NotificationHandle { // Clone [`NotificationService`] fn clone(&mut self) -> Result, ()> { let mut subscribers = self.subscribers.lock(); - let (event_tx, event_rx) = tracing_unbounded("mpsc-notification-to-protocol", 100_000); + + let (event_tx, event_rx) = tracing_unbounded(self.rx.name(), 100_000); subscribers.push(event_tx); Ok(Box::new(NotificationHandle { @@ -624,7 +625,9 @@ pub fn notification_service( protocol: ProtocolName, ) -> (ProtocolHandlePair, Box) { let (cmd_tx, cmd_rx) = mpsc::channel(COMMAND_QUEUE_SIZE); - let (event_tx, event_rx) = tracing_unbounded("mpsc-notification-to-protocol", 100_000); + + let (event_tx, event_rx) = + tracing_unbounded(metric_label_for_protocol(&protocol).leak(), 100_000); let subscribers = Arc::new(Mutex::new(vec![event_tx])); ( @@ -632,3 +635,14 @@ pub fn notification_service( Box::new(NotificationHandle::new(protocol.clone(), cmd_tx, event_rx, subscribers)), ) } + +// Decorates the mpsc-notification-to-protocol metric with the name of the protocol, +// to be able to distiguish between different protocols in dashboards. +fn metric_label_for_protocol(protocol: &ProtocolName) -> String { + let protocol_name = protocol.to_string(); + let keys = protocol_name.split("/").collect::>(); + keys.iter() + .rev() + .take(2) // Last two tokens give the protocol name and version + .fold("mpsc-notification-to-protocol".into(), |acc, val| format!("{}-{}", acc, val)) +} diff --git a/substrate/client/network/src/protocol/notifications/service/tests.rs b/substrate/client/network/src/protocol/notifications/service/tests.rs index 02ba9e1711c3..238e0ccf5668 100644 --- a/substrate/client/network/src/protocol/notifications/service/tests.rs +++ b/substrate/client/network/src/protocol/notifications/service/tests.rs @@ -437,7 +437,7 @@ async fn peer_disconnects_then_async_notification_is_sent() { notif.send_async_notification(&peer_id, vec![1, 3, 3, 7]).await { } else { - panic!("invalid state after calling `send_async_notificatio()` on closed connection") + panic!("invalid state after calling `send_async_notification()` on closed connection") } } diff --git a/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs b/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs index 4e1c033f33b6..85209a888cd9 100644 --- a/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs +++ b/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs @@ -188,7 +188,7 @@ where } } -/// Yielded by the [`NotificationsIn`] after a successfuly upgrade. +/// Yielded by the [`NotificationsIn`] after a successfully upgrade. pub struct NotificationsInOpen { /// Handshake sent by the remote. pub handshake: Vec, @@ -415,7 +415,7 @@ where } } -/// Yielded by the [`NotificationsOut`] after a successfuly upgrade. +/// Yielded by the [`NotificationsOut`] after a successfully upgrade. pub struct NotificationsOutOpen { /// Handshake returned by the remote. pub handshake: Vec, diff --git a/substrate/client/network/src/protocol_controller.rs b/substrate/client/network/src/protocol_controller.rs index 4c8f119baa20..7f851fd8e9c9 100644 --- a/substrate/client/network/src/protocol_controller.rs +++ b/substrate/client/network/src/protocol_controller.rs @@ -448,7 +448,7 @@ impl ProtocolController { self.peer_store.report_disconnect(peer_id); } - /// Ask `Peerset` if the peer has a reputation value not sufficent for connection with it. + /// Ask `Peerset` if the peer has a reputation value not sufficient for connection with it. fn is_banned(&self, peer_id: &PeerId) -> bool { self.peer_store.is_banned(peer_id) } @@ -2020,7 +2020,7 @@ mod tests { ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store)); assert!(matches!(controller.reserved_nodes.get(&reserved1), Some(PeerState::NotConnected))); - // Initiate connectios + // Initiate connections controller.alloc_slots(); assert!(matches!(controller.reserved_nodes.get(&reserved1), Some(PeerState::NotConnected))); assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); diff --git a/substrate/client/network/statement/Cargo.toml b/substrate/client/network/statement/Cargo.toml index b6efee5d9d36..635cfc5d0d5e 100644 --- a/substrate/client/network/statement/Cargo.toml +++ b/substrate/client/network/statement/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] array-bytes = "6.1" async-channel = "1.8.0" codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } -futures = "0.3.21" +futures = "0.3.30" libp2p = "0.51.4" log = { workspace = true, default-features = true } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } diff --git a/substrate/client/network/sync/Cargo.toml b/substrate/client/network/sync/Cargo.toml index 32ba3b6356c0..6b46d67a3cae 100644 --- a/substrate/client/network/sync/Cargo.toml +++ b/substrate/client/network/sync/Cargo.toml @@ -21,9 +21,9 @@ prost-build = "0.11" [dependencies] array-bytes = "6.1" async-channel = "1.8.0" -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" libp2p = "0.51.4" log = { workspace = true, default-features = true } diff --git a/substrate/client/network/sync/src/engine.rs b/substrate/client/network/sync/src/engine.rs index 24640fdc4557..ff40ae95624e 100644 --- a/substrate/client/network/sync/src/engine.rs +++ b/substrate/client/network/sync/src/engine.rs @@ -110,7 +110,7 @@ const INACTIVITY_EVICT_THRESHOLD: Duration = Duration::from_secs(30); /// Parachain collator may incorrectly get evicted because it's waiting to receive a number of /// relaychain blocks before it can start creating parachain blocks. During this wait, /// `SyncingEngine` still counts it as active and as the peer is not sending blocks, it may get -/// evicted if a block is not received within the first 30 secons since the peer connected. +/// evicted if a block is not received within the first 30 seconds since the peer connected. /// /// To prevent this from happening, define a threshold for how long `SyncingEngine` should wait /// before it starts evicting peers. @@ -424,7 +424,7 @@ where .expect("Genesis block exists; qed"), ); - // Split warp sync params into warp sync config and a channel to retreive target block + // Split warp sync params into warp sync config and a channel to retrieve target block // header. let (warp_sync_config, warp_sync_target_block_header_rx) = warp_sync_params.map_or((None, None), |params| { @@ -1057,7 +1057,7 @@ where // still be under validation. If the peer has different genesis than the // local node the validation fails but the peer cannot be reported in // `validate_connection()` as that is also called by - // `ValiateInboundSubstream` which means that the peer is still being + // `ValidateInboundSubstream` which means that the peer is still being // validated and banning the peer when handling that event would // result in peer getting dropped twice. // diff --git a/substrate/client/network/sync/src/strategy.rs b/substrate/client/network/sync/src/strategy.rs index dabcf37ae632..610fd7c65606 100644 --- a/substrate/client/network/sync/src/strategy.rs +++ b/substrate/client/network/sync/src/strategy.rs @@ -185,7 +185,7 @@ where + Sync + 'static, { - /// Initialize a new syncing startegy. + /// Initialize a new syncing strategy. pub fn new( config: SyncingConfig, client: Arc, @@ -418,7 +418,7 @@ where self.state.is_some() || match self.chain_sync { Some(ref s) => s.status().state.is_major_syncing(), - None => unreachable!("At least one syncing startegy is active; qed"), + None => unreachable!("At least one syncing strategy is active; qed"), } } @@ -429,7 +429,7 @@ where /// Returns the current sync status. pub fn status(&self) -> SyncStatus { - // This function presumes that startegies are executed serially and must be refactored + // This function presumes that strategies are executed serially and must be refactored // once we have parallel strategies. if let Some(ref warp) = self.warp { warp.status() @@ -438,7 +438,7 @@ where } else if let Some(ref chain_sync) = self.chain_sync { chain_sync.status() } else { - unreachable!("At least one syncing startegy is always active; qed") + unreachable!("At least one syncing strategy is always active; qed") } } @@ -518,7 +518,7 @@ where /// Proceed with the next strategy if the active one finished. pub fn proceed_to_next(&mut self) -> Result<(), ClientError> { - // The strategies are switched as `WarpSync` -> `StateStartegy` -> `ChainSync`. + // The strategies are switched as `WarpSync` -> `StateStrategy` -> `ChainSync`. if let Some(ref mut warp) = self.warp { match warp.take_result() { Some(res) => { @@ -569,7 +569,7 @@ where }, } } else if let Some(state) = &self.state { - if state.is_succeded() { + if state.is_succeeded() { info!(target: LOG_TARGET, "State sync is complete, continuing with block sync."); } else { error!(target: LOG_TARGET, "State sync failed. Falling back to full sync."); diff --git a/substrate/client/network/sync/src/strategy/chain_sync.rs b/substrate/client/network/sync/src/strategy/chain_sync.rs index ad0c75363e78..da04bbbeccc8 100644 --- a/substrate/client/network/sync/src/strategy/chain_sync.rs +++ b/substrate/client/network/sync/src/strategy/chain_sync.rs @@ -117,7 +117,7 @@ mod rep { /// Reputation change for peers which send us a block with bad justifications. pub const BAD_JUSTIFICATION: Rep = Rep::new(-(1 << 16), "Bad justification"); - /// Reputation change when a peer sent us invlid ancestry result. + /// Reputation change when a peer sent us invalid ancestry result. pub const UNKNOWN_ANCESTOR: Rep = Rep::new(-(1 << 16), "DB Error"); /// Peer response data does not have requested bits. @@ -1334,7 +1334,7 @@ where PeerSyncState::DownloadingJustification(_) => { // Peers that were downloading justifications // should be kept in that state. - // We make sure our commmon number is at least something we have. + // We make sure our common number is at least something we have. trace!( target: LOG_TARGET, "Keeping peer {} after restart, updating common number from={} => to={} (our best).", diff --git a/substrate/client/network/sync/src/strategy/chain_sync/test.rs b/substrate/client/network/sync/src/strategy/chain_sync/test.rs index 127b6862f0e0..cd955113542b 100644 --- a/substrate/client/network/sync/src/strategy/chain_sync/test.rs +++ b/substrate/client/network/sync/src/strategy/chain_sync/test.rs @@ -189,7 +189,7 @@ fn restart_doesnt_affect_peers_downloading_finality_data() { assert_eq!(sync.peers.get(&peer_id3).unwrap().common_number, 50); } -/// Send a block annoucnement for the given `header`. +/// Send a block announcement for the given `header`. fn send_block_announce(header: Header, peer_id: PeerId, sync: &mut ChainSync) { let announce = BlockAnnounce { header: header.clone(), @@ -278,7 +278,7 @@ fn unwrap_from_block_number(from: FromBlock) -> u64 { /// announcement from this node in its sync process. Meaning our common number didn't change. It /// is now expected that we start an ancestor search to find the common number. #[test] -fn do_ancestor_search_when_common_block_to_best_qeued_gap_is_to_big() { +fn do_ancestor_search_when_common_block_to_best_queued_gap_is_to_big() { sp_tracing::try_init_simple(); let blocks = { @@ -472,7 +472,7 @@ fn can_sync_huge_fork() { let actions = sync.take_actions().collect::>(); request = if actions.is_empty() { - // We found the ancenstor + // We found the ancestor break } else { assert_eq!(actions.len(), 1); @@ -607,7 +607,7 @@ fn syncs_fork_without_duplicate_requests() { let actions = sync.take_actions().collect::>(); request = if actions.is_empty() { - // We found the ancenstor + // We found the ancestor break } else { assert_eq!(actions.len(), 1); diff --git a/substrate/client/network/sync/src/strategy/state.rs b/substrate/client/network/sync/src/strategy/state.rs index 12d36ff9e01a..6d3b215f7f32 100644 --- a/substrate/client/network/sync/src/strategy/state.rs +++ b/substrate/client/network/sync/src/strategy/state.rs @@ -79,7 +79,7 @@ pub struct StateStrategy { state_sync: Box>, peers: HashMap>, actions: Vec>, - succeded: bool, + succeeded: bool, } impl StateStrategy { @@ -110,7 +110,7 @@ impl StateStrategy { )), peers, actions: Vec::new(), - succeded: false, + succeeded: false, } } @@ -129,7 +129,7 @@ impl StateStrategy { }) .collect(), actions: Vec::new(), - succeded: false, + succeeded: false, } } @@ -260,7 +260,7 @@ impl StateStrategy { "Failed to import target block with state: {e:?}." ); }); - self.succeded |= results.into_iter().any(|result| result.is_ok()); + self.succeeded |= results.into_iter().any(|result| result.is_ok()); self.actions.push(StateStrategyAction::Finished); } } @@ -342,10 +342,10 @@ impl StateStrategy { std::mem::take(&mut self.actions).into_iter() } - /// Check if state sync has succeded. + /// Check if state sync has succeeded. #[must_use] - pub fn is_succeded(&self) -> bool { - self.succeded + pub fn is_succeeded(&self) -> bool { + self.succeeded } } @@ -669,7 +669,7 @@ mod test { } #[test] - fn succesfully_importing_target_block_finishes_strategy() { + fn successfully_importing_target_block_finishes_strategy() { let target_hash = Hash::random(); let mut state_sync_provider = MockStateSync::::new(); state_sync_provider.expect_target_hash().return_const(target_hash); diff --git a/substrate/client/network/sync/src/strategy/warp.rs b/substrate/client/network/sync/src/strategy/warp.rs index 7935b5f29b68..c7b79228efee 100644 --- a/substrate/client/network/sync/src/strategy/warp.rs +++ b/substrate/client/network/sync/src/strategy/warp.rs @@ -968,7 +968,7 @@ mod test { warp_sync.on_warp_proof_response(&request_peer_id, EncodedProof(Vec::new())); - // We only interested in alredy generated actions, not new requests. + // We only interested in already generated actions, not new requests. let actions = std::mem::take(&mut warp_sync.actions); assert_eq!(actions.len(), 1); assert!(matches!( diff --git a/substrate/client/network/sync/src/warp_request_handler.rs b/substrate/client/network/sync/src/warp_request_handler.rs index 39cf1c5d8067..eda67cac95ff 100644 --- a/substrate/client/network/sync/src/warp_request_handler.rs +++ b/substrate/client/network/sync/src/warp_request_handler.rs @@ -57,7 +57,7 @@ pub fn generate_request_response_config>( } } -/// Generate the grandpa warp sync protocol name from the genesi hash and fork id. +/// Generate the grandpa warp sync protocol name from the genesis hash and fork id. fn generate_protocol_name>(genesis_hash: Hash, fork_id: Option<&str>) -> String { let genesis_hash = genesis_hash.as_ref(); if let Some(fork_id) = fork_id { diff --git a/substrate/client/network/test/Cargo.toml b/substrate/client/network/test/Cargo.toml index 4f57287a39cc..56fc89e1b2b9 100644 --- a/substrate/client/network/test/Cargo.toml +++ b/substrate/client/network/test/Cargo.toml @@ -16,9 +16,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -tokio = "1.22.0" -async-trait = "0.1.74" -futures = "0.3.21" +tokio = "1.37" +async-trait = "0.1.79" +futures = "0.3.30" futures-timer = "3.0.1" libp2p = "0.51.4" log = { workspace = true, default-features = true } diff --git a/substrate/client/network/test/src/sync.rs b/substrate/client/network/test/src/sync.rs index c025a8262f0e..f1c1b7414303 100644 --- a/substrate/client/network/test/src/sync.rs +++ b/substrate/client/network/test/src/sync.rs @@ -749,7 +749,7 @@ async fn sync_blocks_when_block_announce_validator_says_it_is_new_best() { } } -/// Waits for some time until the validation is successfull. +/// Waits for some time until the validation is successful. struct DeferredBlockAnnounceValidator; impl BlockAnnounceValidator for DeferredBlockAnnounceValidator { diff --git a/substrate/client/network/transactions/Cargo.toml b/substrate/client/network/transactions/Cargo.toml index 01c8ac8814d6..0ab7386ef21f 100644 --- a/substrate/client/network/transactions/Cargo.toml +++ b/substrate/client/network/transactions/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = "6.1" codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } -futures = "0.3.21" +futures = "0.3.30" libp2p = "0.51.4" log = { workspace = true, default-features = true } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } diff --git a/substrate/client/offchain/Cargo.toml b/substrate/client/offchain/Cargo.toml index caa4bb03f40c..a3a3cfaa8fca 100644 --- a/substrate/client/offchain/Cargo.toml +++ b/substrate/client/offchain/Cargo.toml @@ -20,7 +20,7 @@ array-bytes = "6.1" bytes = "1.1" codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } fnv = "1.0.6" -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" hyper = { version = "0.14.16", features = ["http2", "stream"] } hyper-rustls = { version = "0.24.0", features = ["http2"] } @@ -46,7 +46,7 @@ log = { workspace = true, default-features = true } [dev-dependencies] lazy_static = "1.4.0" -tokio = "1.22.0" +tokio = "1.37" sc-block-builder = { path = "../block-builder" } sc-client-db = { path = "../db", default-features = true } sc-transaction-pool = { path = "../transaction-pool" } diff --git a/substrate/client/offchain/src/api/http.rs b/substrate/client/offchain/src/api/http.rs index 7ca5e3fd13af..46f573341c57 100644 --- a/substrate/client/offchain/src/api/http.rs +++ b/substrate/client/offchain/src/api/http.rs @@ -604,7 +604,7 @@ enum WorkerToApi { /// because we don't want the `HttpApi` to have to drive the reading. /// Instead, reading an item from the channel will notify the worker task, which will push /// the next item. - /// Can also be used to send an error, in case an error happend on the HTTP socket. After + /// Can also be used to send an error, in case an error happened on the HTTP socket. After /// an error is sent, the channel will close. body: mpsc::Receiver>, }, diff --git a/substrate/client/rpc-api/Cargo.toml b/substrate/client/rpc-api/Cargo.toml index 1b7af6a4a52f..169714d22453 100644 --- a/substrate/client/rpc-api/Cargo.toml +++ b/substrate/client/rpc-api/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } thiserror = { workspace = true } diff --git a/substrate/client/rpc-servers/Cargo.toml b/substrate/client/rpc-servers/Cargo.toml index 3adc81c57d59..bc21b5b1582f 100644 --- a/substrate/client/rpc-servers/Cargo.toml +++ b/substrate/client/rpc-servers/Cargo.toml @@ -25,5 +25,5 @@ tower-http = { version = "0.4.0", features = ["cors"] } tower = { version = "0.4.13", features = ["util"] } http = "0.2.8" hyper = "0.14.27" -futures = "0.3.29" +futures = "0.3.30" governor = "0.6.0" diff --git a/substrate/client/rpc-servers/src/middleware/metrics.rs b/substrate/client/rpc-servers/src/middleware/metrics.rs index 17849dc0c448..688c3c2a9fc4 100644 --- a/substrate/client/rpc-servers/src/middleware/metrics.rs +++ b/substrate/client/rpc-servers/src/middleware/metrics.rs @@ -182,7 +182,7 @@ impl RpcMetrics { transport_label, req.method_name(), // the label "is_error", so `success` should be regarded as false - // and vice-versa to be registrered correctly. + // and vice-versa to be registered correctly. if rp.is_success() { "false" } else { "true" }, if is_rate_limited { "true" } else { "false" }, ]) diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml index c62b3e789d38..e2612d914542 100644 --- a/substrate/client/rpc-spec-v2/Cargo.toml +++ b/substrate/client/rpc-spec-v2/Cargo.toml @@ -34,7 +34,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1" } thiserror = { workspace = true } serde = { workspace = true, default-features = true } hex = "0.4" -futures = "0.3.21" +futures = "0.3.30" parking_lot = "0.12.1" tokio-stream = { version = "0.1.14", features = ["sync"] } tokio = { version = "1.22.0", features = ["sync"] } @@ -44,6 +44,7 @@ futures-util = { version = "0.3.30", default-features = false } rand = "0.8.5" [dev-dependencies] +jsonrpsee = { version = "0.22", features = ["server", "ws-client"] } serde_json = { workspace = true, default-features = true } tokio = { version = "1.22.0", features = ["macros"] } substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } diff --git a/substrate/client/rpc-spec-v2/src/archive/tests.rs b/substrate/client/rpc-spec-v2/src/archive/tests.rs index 1803ffa3a318..de71ed82a128 100644 --- a/substrate/client/rpc-spec-v2/src/archive/tests.rs +++ b/substrate/client/rpc-spec-v2/src/archive/tests.rs @@ -435,7 +435,7 @@ async fn archive_storage_closest_merkle_value() { /// The core of this test. /// - /// Checks keys that are exact match, keys with descedant and keys that should not return + /// Checks keys that are exact match, keys with descendant and keys that should not return /// values. /// /// Returns (key, merkle value) pairs. @@ -459,7 +459,7 @@ async fn archive_storage_closest_merkle_value() { query_type: StorageQueryType::ClosestDescendantMerkleValue, pagination_start_key: None, }, - // Key with descedent. + // Key with descendant. PaginatedStorageQuery { key: hex_string(b":A"), query_type: StorageQueryType::ClosestDescendantMerkleValue, diff --git a/substrate/client/rpc-spec-v2/src/chain_head/api.rs b/substrate/client/rpc-spec-v2/src/chain_head/api.rs index 00000e1fb277..3851adac2644 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/api.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/api.rs @@ -27,7 +27,7 @@ use crate::{ common::events::StorageQuery, }; use jsonrpsee::{proc_macros::rpc, server::ResponsePayload}; -use sp_rpc::list::ListOrValue; +pub use sp_rpc::list::ListOrValue; #[rpc(client, server)] pub trait ChainHeadApi { @@ -54,8 +54,8 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_body", blocking)] - fn chain_head_unstable_body( + #[method(name = "chainHead_unstable_body", raw_method)] + async fn chain_head_unstable_body( &self, follow_subscription: String, hash: Hash, @@ -73,8 +73,8 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_header", blocking)] - fn chain_head_unstable_header( + #[method(name = "chainHead_unstable_header", raw_method)] + async fn chain_head_unstable_header( &self, follow_subscription: String, hash: Hash, @@ -85,8 +85,8 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_storage", blocking)] - fn chain_head_unstable_storage( + #[method(name = "chainHead_unstable_storage", raw_method)] + async fn chain_head_unstable_storage( &self, follow_subscription: String, hash: Hash, @@ -99,8 +99,8 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_call", blocking)] - fn chain_head_unstable_call( + #[method(name = "chainHead_unstable_call", raw_method)] + async fn chain_head_unstable_call( &self, follow_subscription: String, hash: Hash, @@ -118,8 +118,8 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_unpin", blocking)] - fn chain_head_unstable_unpin( + #[method(name = "chainHead_unstable_unpin", raw_method)] + async fn chain_head_unstable_unpin( &self, follow_subscription: String, hash_or_hashes: ListOrValue, @@ -131,8 +131,8 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_continue", blocking)] - fn chain_head_unstable_continue( + #[method(name = "chainHead_unstable_continue", raw_method)] + async fn chain_head_unstable_continue( &self, follow_subscription: String, operation_id: String, @@ -145,8 +145,8 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_stopOperation", blocking)] - fn chain_head_unstable_stop_operation( + #[method(name = "chainHead_unstable_stopOperation", raw_method)] + async fn chain_head_unstable_stop_operation( &self, follow_subscription: String, operation_id: String, diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs index 2bda22b45239..86d9a726d7be 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs @@ -34,10 +34,10 @@ use crate::{ hex_string, SubscriptionTaskExecutor, }; use codec::Encode; -use futures::future::FutureExt; +use futures::{channel::oneshot, future::FutureExt}; use jsonrpsee::{ - core::async_trait, server::ResponsePayload, types::SubscriptionId, MethodResponseFuture, - PendingSubscriptionSink, SubscriptionSink, + core::async_trait, server::ResponsePayload, types::SubscriptionId, ConnectionDetails, + MethodResponseFuture, PendingSubscriptionSink, SubscriptionSink, }; use log::debug; use sc_client_api::{ @@ -62,9 +62,14 @@ pub struct ChainHeadConfig { pub subscription_max_pinned_duration: Duration, /// The maximum number of ongoing operations per subscription. pub subscription_max_ongoing_operations: usize, + /// Stop all subscriptions if the distance between the leaves and the current finalized + /// block is larger than this value. + pub max_lagging_distance: usize, /// The maximum number of items reported by the `chainHead_storage` before /// pagination is required. pub operation_max_storage_items: usize, + /// The maximum number of `chainHead_follow` subscriptions per connection. + pub max_follow_subscriptions_per_connection: usize, } /// Maximum pinned blocks across all connections. @@ -86,13 +91,22 @@ const MAX_ONGOING_OPERATIONS: usize = 16; /// before paginations is required. const MAX_STORAGE_ITER_ITEMS: usize = 5; +/// Stop all subscriptions if the distance between the leaves and the current finalized +/// block is larger than this value. +const MAX_LAGGING_DISTANCE: usize = 128; + +/// The maximum number of `chainHead_follow` subscriptions per connection. +const MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION: usize = 4; + impl Default for ChainHeadConfig { fn default() -> Self { ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: MAX_PINNED_DURATION, subscription_max_ongoing_operations: MAX_ONGOING_OPERATIONS, + max_lagging_distance: MAX_LAGGING_DISTANCE, operation_max_storage_items: MAX_STORAGE_ITER_ITEMS, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, } } } @@ -106,10 +120,13 @@ pub struct ChainHead, Block: BlockT, Client> { /// Executor to spawn subscriptions. executor: SubscriptionTaskExecutor, /// Keep track of the pinned blocks for each subscription. - subscriptions: Arc>, + subscriptions: SubscriptionManagement, /// The maximum number of items reported by the `chainHead_storage` before /// pagination is required. operation_max_storage_items: usize, + /// Stop all subscriptions if the distance between the leaves and the current finalized + /// block is larger than this value. + max_lagging_distance: usize, /// Phantom member to pin the block type. _phantom: PhantomData, } @@ -126,13 +143,15 @@ impl, Block: BlockT, Client> ChainHead { client, backend: backend.clone(), executor, - subscriptions: Arc::new(SubscriptionManagement::new( + subscriptions: SubscriptionManagement::new( config.global_max_pinned_blocks, config.subscription_max_pinned_duration, config.subscription_max_ongoing_operations, + config.max_follow_subscriptions_per_connection, backend, - )), + ), operation_max_storage_items: config.operation_max_storage_items, + max_lagging_distance: config.max_lagging_distance, _phantom: PhantomData, } } @@ -180,17 +199,29 @@ where let subscriptions = self.subscriptions.clone(); let backend = self.backend.clone(); let client = self.client.clone(); + let max_lagging_distance = self.max_lagging_distance; let fut = async move { + // Ensure the current connection ID has enough space to accept a new subscription. + let connection_id = pending.connection_id(); + // The RAII `reserved_subscription` will clean up resources on drop: + // - free the reserved subscription for the connection ID. + // - remove the subscription ID from the subscription management. + let Some(mut reserved_subscription) = subscriptions.reserve_subscription(connection_id) + else { + pending.reject(ChainHeadRpcError::ReachedLimits).await; + return + }; + let Ok(sink) = pending.accept().await else { return }; let sub_id = read_subscription_id_as_string(&sink); - // Keep track of the subscription. - let Some(sub_data) = subscriptions.insert_subscription(sub_id.clone(), with_runtime) + let Some(sub_data) = + reserved_subscription.insert_subscription(sub_id.clone(), with_runtime) else { - // Inserting the subscription can only fail if the JsonRPSee - // generated a duplicate subscription ID. + // Inserting the subscription can only fail if the JsonRPSee generated a duplicate + // subscription ID. debug!(target: LOG_TARGET, "[follow][id={:?}] Subscription already accepted", sub_id); let msg = to_sub_message(&sink, &FollowEvent::::Stop); let _ = sink.send(msg).await; @@ -201,91 +232,121 @@ where let mut chain_head_follow = ChainHeadFollower::new( client, backend, - subscriptions.clone(), + subscriptions, with_runtime, sub_id.clone(), + max_lagging_distance, ); + let result = chain_head_follow.generate_events(sink, sub_data).await; + if let Err(SubscriptionManagementError::BlockDistanceTooLarge) = result { + debug!(target: LOG_TARGET, "[follow][id={:?}] All subscriptions are stopped", sub_id); + reserved_subscription.stop_all_subscriptions(); + } - chain_head_follow.generate_events(sink, sub_data).await; - - subscriptions.remove_subscription(&sub_id); debug!(target: LOG_TARGET, "[follow][id={:?}] Subscription removed", sub_id); }; self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); } - fn chain_head_unstable_body( + async fn chain_head_unstable_body( &self, + connection_details: ConnectionDetails, follow_subscription: String, hash: Block::Hash, ) -> ResponsePayload<'static, MethodResponse> { - let mut block_guard = match self.subscriptions.lock_block(&follow_subscription, hash, 1) { - Ok(block) => block, - Err(SubscriptionManagementError::SubscriptionAbsent) | - Err(SubscriptionManagementError::ExceededLimits) => - return ResponsePayload::success(MethodResponse::LimitReached), - Err(SubscriptionManagementError::BlockHashAbsent) => { - // Block is not part of the subscription. - return ResponsePayload::error(ChainHeadRpcError::InvalidBlock); - }, - Err(_) => return ResponsePayload::error(ChainHeadRpcError::InvalidBlock), - }; + if !self + .subscriptions + .contains_subscription(connection_details.id(), &follow_subscription) + { + // The spec says to return `LimitReached` if the follow subscription is invalid or + // stale. + return ResponsePayload::success(MethodResponse::LimitReached); + } - let operation_id = block_guard.operation().operation_id(); + let client = self.client.clone(); + let subscriptions = self.subscriptions.clone(); + let executor = self.executor.clone(); + + let result = spawn_blocking(&self.executor, async move { + let mut block_guard = match subscriptions.lock_block(&follow_subscription, hash, 1) { + Ok(block) => block, + Err(SubscriptionManagementError::SubscriptionAbsent) | + Err(SubscriptionManagementError::ExceededLimits) => + return ResponsePayload::success(MethodResponse::LimitReached), + Err(SubscriptionManagementError::BlockHashAbsent) => { + // Block is not part of the subscription. + return ResponsePayload::error(ChainHeadRpcError::InvalidBlock); + }, + Err(_) => return ResponsePayload::error(ChainHeadRpcError::InvalidBlock), + }; - let event = match self.client.block(hash) { - Ok(Some(signed_block)) => { - let extrinsics = signed_block - .block - .extrinsics() - .iter() - .map(|extrinsic| hex_string(&extrinsic.encode())) - .collect(); - FollowEvent::::OperationBodyDone(OperationBodyDone { + let operation_id = block_guard.operation().operation_id(); + + let event = match client.block(hash) { + Ok(Some(signed_block)) => { + let extrinsics = signed_block + .block + .extrinsics() + .iter() + .map(|extrinsic| hex_string(&extrinsic.encode())) + .collect(); + FollowEvent::::OperationBodyDone(OperationBodyDone { + operation_id: operation_id.clone(), + value: extrinsics, + }) + }, + Ok(None) => { + // The block's body was pruned. This subscription ID has become invalid. + debug!( + target: LOG_TARGET, + "[body][id={:?}] Stopping subscription because hash={:?} was pruned", + &follow_subscription, + hash + ); + subscriptions.remove_subscription(&follow_subscription); + return ResponsePayload::error(ChainHeadRpcError::InvalidBlock) + }, + Err(error) => FollowEvent::::OperationError(OperationError { operation_id: operation_id.clone(), - value: extrinsics, - }) - }, - Ok(None) => { - // The block's body was pruned. This subscription ID has become invalid. - debug!( - target: LOG_TARGET, - "[body][id={:?}] Stopping subscription because hash={:?} was pruned", - &follow_subscription, - hash - ); - self.subscriptions.remove_subscription(&follow_subscription); - return ResponsePayload::error(ChainHeadRpcError::InvalidBlock) - }, - Err(error) => FollowEvent::::OperationError(OperationError { - operation_id: operation_id.clone(), - error: error.to_string(), - }), - }; + error: error.to_string(), + }), + }; - let (rp, rp_fut) = method_started_response(operation_id, None); + let (rp, rp_fut) = method_started_response(operation_id, None); + let fut = async move { + // Wait for the server to send out the response and if it produces an error no event + // should be generated. + if rp_fut.await.is_err() { + return; + } - let fut = async move { - // Events should only by generated - // if the response was successfully propagated. - if rp_fut.await.is_err() { - return; - } - let _ = block_guard.response_sender().unbounded_send(event); - }; + let _ = block_guard.response_sender().unbounded_send(event); + }; + executor.spawn_blocking("substrate-rpc-subscription", Some("rpc"), fut.boxed()); - self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); + rp + }); - rp + result + .await + .unwrap_or_else(|_| ResponsePayload::success(MethodResponse::LimitReached)) } - fn chain_head_unstable_header( + async fn chain_head_unstable_header( &self, + connection_details: ConnectionDetails, follow_subscription: String, hash: Block::Hash, ) -> Result, ChainHeadRpcError> { - let _block_guard = match self.subscriptions.lock_block(&follow_subscription, hash, 1) { + if !self + .subscriptions + .contains_subscription(connection_details.id(), &follow_subscription) + { + return Ok(None); + } + + let block_guard = match self.subscriptions.lock_block(&follow_subscription, hash, 1) { Ok(block) => block, Err(SubscriptionManagementError::SubscriptionAbsent) | Err(SubscriptionManagementError::ExceededLimits) => return Ok(None), @@ -296,19 +357,35 @@ where Err(_) => return Err(ChainHeadRpcError::InvalidBlock.into()), }; - self.client - .header(hash) - .map(|opt_header| opt_header.map(|h| hex_string(&h.encode()))) - .map_err(|err| ChainHeadRpcError::InternalError(err.to_string())) + let client = self.client.clone(); + let result = spawn_blocking(&self.executor, async move { + let _block_guard = block_guard; + + client + .header(hash) + .map(|opt_header| opt_header.map(|h| hex_string(&h.encode()))) + .map_err(|err| ChainHeadRpcError::InternalError(err.to_string())) + }); + result.await.unwrap_or_else(|_| Ok(None)) } - fn chain_head_unstable_storage( + async fn chain_head_unstable_storage( &self, + connection_details: ConnectionDetails, follow_subscription: String, hash: Block::Hash, items: Vec>, child_trie: Option, ) -> ResponsePayload<'static, MethodResponse> { + if !self + .subscriptions + .contains_subscription(connection_details.id(), &follow_subscription) + { + // The spec says to return `LimitReached` if the follow subscription is invalid or + // stale. + return ResponsePayload::success(MethodResponse::LimitReached); + } + // Gain control over parameter parsing and returned error. let items = match items .into_iter() @@ -357,25 +434,25 @@ where let mut items = items; items.truncate(num_operations); - let (rp, rp_is_success) = method_started_response(operation_id, Some(discarded)); - + let (rp, rp_fut) = method_started_response(operation_id, Some(discarded)); let fut = async move { - // Events should only by generated - // if the response was successfully propagated. - if rp_is_success.await.is_err() { + // Wait for the server to send out the response and if it produces an error no event + // should be generated. + if rp_fut.await.is_err() { return; } + storage_client.generate_events(block_guard, hash, items, child_trie).await; }; - self.executor .spawn_blocking("substrate-rpc-subscription", Some("rpc"), fut.boxed()); rp } - fn chain_head_unstable_call( + async fn chain_head_unstable_call( &self, + connection_details: ConnectionDetails, follow_subscription: String, hash: Block::Hash, function: String, @@ -386,6 +463,15 @@ where Err(err) => return ResponsePayload::error(err), }; + if !self + .subscriptions + .contains_subscription(connection_details.id(), &follow_subscription) + { + // The spec says to return `LimitReached` if the follow subscription is invalid or + // stale. + return ResponsePayload::success(MethodResponse::LimitReached); + } + let mut block_guard = match self.subscriptions.lock_block(&follow_subscription, hash, 1) { Ok(block) => block, Err(SubscriptionManagementError::SubscriptionAbsent) | @@ -408,44 +494,53 @@ where } let operation_id = block_guard.operation().operation_id(); - let event = self - .client - .executor() - .call(hash, &function, &call_parameters, CallContext::Offchain) - .map(|result| { - FollowEvent::::OperationCallDone(OperationCallDone { - operation_id: operation_id.clone(), - output: hex_string(&result), - }) - }) - .unwrap_or_else(|error| { - FollowEvent::::OperationError(OperationError { - operation_id: operation_id.clone(), - error: error.to_string(), - }) - }); - - let (rp, rp_fut) = method_started_response(operation_id, None); + let client = self.client.clone(); + let (rp, rp_fut) = method_started_response(operation_id.clone(), None); let fut = async move { - // Events should only by generated - // if the response was successfully propagated. + // Wait for the server to send out the response and if it produces an error no event + // should be generated. if rp_fut.await.is_err() { - return; + return } + + let event = client + .executor() + .call(hash, &function, &call_parameters, CallContext::Offchain) + .map(|result| { + FollowEvent::::OperationCallDone(OperationCallDone { + operation_id: operation_id.clone(), + output: hex_string(&result), + }) + }) + .unwrap_or_else(|error| { + FollowEvent::::OperationError(OperationError { + operation_id: operation_id.clone(), + error: error.to_string(), + }) + }); + let _ = block_guard.response_sender().unbounded_send(event); }; - - self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); + self.executor + .spawn_blocking("substrate-rpc-subscription", Some("rpc"), fut.boxed()); rp } - fn chain_head_unstable_unpin( + async fn chain_head_unstable_unpin( &self, + connection_details: ConnectionDetails, follow_subscription: String, hash_or_hashes: ListOrValue, ) -> Result<(), ChainHeadRpcError> { + if !self + .subscriptions + .contains_subscription(connection_details.id(), &follow_subscription) + { + return Ok(()); + } + let result = match hash_or_hashes { ListOrValue::Value(hash) => self.subscriptions.unpin_blocks(&follow_subscription, [hash]), @@ -469,11 +564,19 @@ where } } - fn chain_head_unstable_continue( + async fn chain_head_unstable_continue( &self, + connection_details: ConnectionDetails, follow_subscription: String, operation_id: String, ) -> Result<(), ChainHeadRpcError> { + if !self + .subscriptions + .contains_subscription(connection_details.id(), &follow_subscription) + { + return Ok(()) + } + let Some(operation) = self.subscriptions.get_operation(&follow_subscription, &operation_id) else { return Ok(()) @@ -487,11 +590,19 @@ where } } - fn chain_head_unstable_stop_operation( + async fn chain_head_unstable_stop_operation( &self, + connection_details: ConnectionDetails, follow_subscription: String, operation_id: String, ) -> Result<(), ChainHeadRpcError> { + if !self + .subscriptions + .contains_subscription(connection_details.id(), &follow_subscription) + { + return Ok(()) + } + let Some(operation) = self.subscriptions.get_operation(&follow_subscription, &operation_id) else { return Ok(()) @@ -510,3 +621,26 @@ fn method_started_response( let rp = MethodResponse::Started(MethodResponseStarted { operation_id, discarded_items }); ResponsePayload::success(rp).notify_on_completion() } + +/// Spawn a blocking future on the provided executor and return the result on a oneshot channel. +/// +/// This is a wrapper to extract the result of a `executor.spawn_blocking` future. +fn spawn_blocking( + executor: &SubscriptionTaskExecutor, + fut: impl std::future::Future + Send + 'static, +) -> oneshot::Receiver +where + R: Send + 'static, +{ + let (tx, rx) = oneshot::channel(); + + let blocking_fut = async move { + let result = fut.await; + // Send the result back on the channel. + let _ = tx.send(result); + }; + + executor.spawn_blocking("substrate-rpc-subscription", Some("rpc"), blocking_fut.boxed()); + + rx +} diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs index afa99f3aa164..0d87a45c07e2 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs @@ -41,12 +41,14 @@ use sp_api::CallApiAt; use sp_blockchain::{ Backend as BlockChainBackend, Error as BlockChainError, HeaderBackend, HeaderMetadata, Info, }; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; +use sp_runtime::{ + traits::{Block as BlockT, Header as HeaderT, NumberFor}, + SaturatedConversion, Saturating, +}; use std::{ collections::{HashSet, VecDeque}, sync::Arc, }; - /// The maximum number of finalized blocks provided by the /// `Initialized` event. const MAX_FINALIZED_BLOCKS: usize = 16; @@ -60,13 +62,16 @@ pub struct ChainHeadFollower, Block: BlockT, Client> { /// Backend of the chain. backend: Arc, /// Subscriptions handle. - sub_handle: Arc>, + sub_handle: SubscriptionManagement, /// Subscription was started with the runtime updates flag. with_runtime: bool, /// Subscription ID. sub_id: String, /// The best reported block by this subscription. best_block_cache: Option, + /// Stop all subscriptions if the distance between the leaves and the current finalized + /// block is larger than this value. + max_lagging_distance: usize, } impl, Block: BlockT, Client> ChainHeadFollower { @@ -74,11 +79,20 @@ impl, Block: BlockT, Client> ChainHeadFollower, backend: Arc, - sub_handle: Arc>, + sub_handle: SubscriptionManagement, with_runtime: bool, sub_id: String, + max_lagging_distance: usize, ) -> Self { - Self { client, backend, sub_handle, with_runtime, sub_id, best_block_cache: None } + Self { + client, + backend, + sub_handle, + with_runtime, + sub_id, + best_block_cache: None, + max_lagging_distance, + } } } @@ -186,6 +200,35 @@ where } } + /// Check the distance between the provided blocks does not exceed a + /// a reasonable range. + /// + /// When the blocks are too far apart (potentially millions of blocks): + /// - Tree route is expensive to calculate. + /// - The RPC layer will not be able to generate the `NewBlock` events for all blocks. + /// + /// This edge-case can happen for parachains where the relay chain syncs slower to + /// the head of the chain than the parachain node that is synced already. + fn distace_within_reason( + &self, + block: Block::Hash, + finalized: Block::Hash, + ) -> Result<(), SubscriptionManagementError> { + let Some(block_num) = self.client.number(block)? else { + return Err(SubscriptionManagementError::BlockHashAbsent) + }; + let Some(finalized_num) = self.client.number(finalized)? else { + return Err(SubscriptionManagementError::BlockHashAbsent) + }; + + let distance: usize = block_num.saturating_sub(finalized_num).saturated_into(); + if distance > self.max_lagging_distance { + return Err(SubscriptionManagementError::BlockDistanceTooLarge); + } + + Ok(()) + } + /// Get the in-memory blocks of the client, starting from the provided finalized hash. /// /// The reported blocks are pinned by this function. @@ -198,6 +241,13 @@ where let mut pruned_forks = HashSet::new(); let mut finalized_block_descendants = Vec::new(); let mut unique_descendants = HashSet::new(); + + // Ensure all leaves are within a reasonable distance from the finalized block, + // before traversing the tree. + for leaf in &leaves { + self.distace_within_reason(*leaf, finalized)?; + } + for leaf in leaves { let tree_route = sp_blockchain::tree_route(blockchain, finalized, leaf)?; @@ -542,11 +592,17 @@ where mut to_ignore: HashSet, sink: SubscriptionSink, rx_stop: oneshot::Receiver<()>, - ) where + ) -> Result<(), SubscriptionManagementError> + where EventStream: Stream> + Unpin, { let mut stream_item = stream.next(); - let mut stop_event = rx_stop; + + // The stop event can be triggered by the chainHead logic when the pinned + // block guarantee cannot be hold. Or when the client is disconnected. + let connection_closed = sink.closed(); + tokio::pin!(connection_closed); + let mut stop_event = futures_util::future::select(rx_stop, connection_closed); while let Either::Left((Some(event), next_stop_event)) = futures_util::future::select(stream_item, stop_event).await @@ -571,7 +627,7 @@ where ); let msg = to_sub_message(&sink, &FollowEvent::::Stop); let _ = sink.send(msg).await; - return + return Err(err) }, }; @@ -586,7 +642,8 @@ where let msg = to_sub_message(&sink, &FollowEvent::::Stop); let _ = sink.send(msg).await; - return + // No need to propagate this error further, the client disconnected. + return Ok(()) } } @@ -594,10 +651,13 @@ where stop_event = next_stop_event; } - // If we got here either the substrate streams have closed - // or the `Stop` receiver was triggered. + // If we got here either: + // - the substrate streams have closed + // - the `Stop` receiver was triggered internally (cannot hold the pinned block guarantee) + // - the client disconnected. let msg = to_sub_message(&sink, &FollowEvent::::Stop); let _ = sink.send(msg).await; + Ok(()) } /// Generate the block events for the `chainHead_follow` method. @@ -605,7 +665,7 @@ where &mut self, sink: SubscriptionSink, sub_data: InsertedSubscriptionData, - ) { + ) -> Result<(), SubscriptionManagementError> { // Register for the new block and finalized notifications. let stream_import = self .client @@ -633,7 +693,7 @@ where ); let msg = to_sub_message(&sink, &FollowEvent::::Stop); let _ = sink.send(msg).await; - return + return Err(err) }, }; @@ -643,6 +703,6 @@ where let stream = stream::once(futures::future::ready(initial)).chain(merged); self.submit_events(&startup_point, stream.boxed(), pruned_forks, sink, sub_data.rx_stop) - .await; + .await } } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/error.rs b/substrate/client/rpc-spec-v2/src/chain_head/error.rs index 8c50e445aa0c..35604db06600 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/error.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/error.rs @@ -23,6 +23,9 @@ use jsonrpsee::types::error::ErrorObject; /// ChainHead RPC errors. #[derive(Debug, thiserror::Error)] pub enum Error { + /// Maximum number of chainHead_follow has been reached. + #[error("Maximum number of chainHead_follow has been reached")] + ReachedLimits, /// The provided block hash is invalid. #[error("Invalid block hash")] InvalidBlock, @@ -46,6 +49,8 @@ pub enum Error { /// Errors for `chainHead` RPC module, as defined in /// . pub mod rpc_spec_v2 { + /// Maximum number of chainHead_follow has been reached. + pub const REACHED_LIMITS: i32 = -32800; /// The provided block hash is invalid. pub const INVALID_BLOCK_ERROR: i32 = -32801; /// The follow subscription was started with `withRuntime` set to `false`. @@ -70,6 +75,8 @@ impl From for ErrorObject<'static> { let msg = e.to_string(); match e { + Error::ReachedLimits => + ErrorObject::owned(rpc_spec_v2::REACHED_LIMITS, msg, None::<()>), Error::InvalidBlock => ErrorObject::owned(rpc_spec_v2::INVALID_BLOCK_ERROR, msg, None::<()>), Error::InvalidRuntimeCall(_) => diff --git a/substrate/client/rpc-spec-v2/src/chain_head/event.rs b/substrate/client/rpc-spec-v2/src/chain_head/event.rs index e0c804d16eb1..bd9863060910 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/event.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/event.rs @@ -111,7 +111,7 @@ impl From for RuntimeEvent { #[derive(Debug, Clone, PartialEq, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Initialized { - /// The hash of the lastest finalized blocks. + /// The hash of the latest finalized blocks. pub finalized_block_hashes: Vec, /// The runtime version of the finalized block. /// @@ -315,7 +315,7 @@ pub enum FollowEvent { Stop, } -/// The method respose of `chainHead_body`, `chainHead_call` and `chainHead_storage`. +/// The method response of `chainHead_body`, `chainHead_call` and `chainHead_storage`. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] #[serde(tag = "result")] diff --git a/substrate/client/rpc-spec-v2/src/chain_head/subscription/error.rs b/substrate/client/rpc-spec-v2/src/chain_head/subscription/error.rs index 2c22e51ca4dc..91ce26db22a5 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/subscription/error.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/subscription/error.rs @@ -41,6 +41,9 @@ pub enum SubscriptionManagementError { /// The unpin method was called with duplicate hashes. #[error("Duplicate hashes")] DuplicateHashes, + /// The distance between the leaves and the current finalized block is too large. + #[error("Distance too large")] + BlockDistanceTooLarge, /// Custom error. #[error("Subscription error {0}")] Custom(String), @@ -57,6 +60,7 @@ impl PartialEq for SubscriptionManagementError { (Self::BlockHeaderAbsent, Self::BlockHeaderAbsent) | (Self::SubscriptionAbsent, Self::SubscriptionAbsent) | (Self::DuplicateHashes, Self::DuplicateHashes) => true, + (Self::BlockDistanceTooLarge, Self::BlockDistanceTooLarge) => true, (Self::Custom(lhs), Self::Custom(rhs)) => lhs == rhs, _ => false, } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs index d2879679501f..0e5ccb91d39a 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs @@ -560,6 +560,7 @@ pub struct SubscriptionsInner> { max_ongoing_operations: usize, /// Map the subscription ID to internal details of the subscription. subs: HashMap>, + /// Backend pinning / unpinning blocks. /// /// The `Arc` is handled one level-above, but substrate exposes the backend as Arc. @@ -623,6 +624,15 @@ impl> SubscriptionsInner { } } + /// All active subscriptions are removed. + pub fn stop_all_subscriptions(&mut self) { + let to_remove: Vec<_> = self.subs.keys().map(|sub_id| sub_id.clone()).collect(); + + for sub_id in to_remove { + self.remove_subscription(&sub_id); + } + } + /// Ensure that a new block could be pinned. /// /// If the global number of blocks has been reached this method @@ -878,6 +888,30 @@ mod tests { (backend, client) } + fn produce_blocks( + mut client: Arc>>, + num_blocks: usize, + ) -> Vec<::Hash> { + let mut blocks = Vec::with_capacity(num_blocks); + let mut parent_hash = client.chain_info().genesis_hash; + + for i in 0..num_blocks { + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(parent_hash) + .with_parent_block_number(i as u64) + .build() + .unwrap() + .build() + .unwrap() + .block; + parent_hash = block.header.hash(); + futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + blocks.push(block.header.hash()); + } + + blocks + } + #[test] fn block_state_machine_register_unpin() { let mut state = BlockStateMachine::new(); @@ -1003,37 +1037,10 @@ mod tests { #[test] fn unpin_duplicate_hashes() { - let (backend, mut client) = init_backend(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(client.chain_info().genesis_hash) - .with_parent_block_number(0) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash_1 = block.header.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(hash_1) - .with_parent_block_number(1) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash_2 = block.header.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(hash_2) - .with_parent_block_number(2) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash_3 = block.header.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + let (backend, client) = init_backend(); + + let hashes = produce_blocks(client, 3); + let (hash_1, hash_2, hash_3) = (hashes[0], hashes[1], hashes[2]); let mut subs = SubscriptionsInner::new(10, Duration::from_secs(10), MAX_OPERATIONS_PER_SUB, backend); @@ -1102,18 +1109,10 @@ mod tests { #[test] fn subscription_check_block() { - let (backend, mut client) = init_backend(); - - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(client.chain_info().genesis_hash) - .with_parent_block_number(0) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash = block.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + let (backend, client) = init_backend(); + + let hashes = produce_blocks(client, 1); + let hash = hashes[0]; let mut subs = SubscriptionsInner::new(10, Duration::from_secs(10), MAX_OPERATIONS_PER_SUB, backend); @@ -1140,17 +1139,10 @@ mod tests { #[test] fn subscription_ref_count() { - let (backend, mut client) = init_backend(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(client.chain_info().genesis_hash) - .with_parent_block_number(0) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash = block.header.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + let (backend, client) = init_backend(); + + let hashes = produce_blocks(client, 1); + let hash = hashes[0]; let mut subs = SubscriptionsInner::new(10, Duration::from_secs(10), MAX_OPERATIONS_PER_SUB, backend); @@ -1190,37 +1182,10 @@ mod tests { #[test] fn subscription_remove_subscription() { - let (backend, mut client) = init_backend(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(client.chain_info().genesis_hash) - .with_parent_block_number(0) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash_1 = block.header.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(hash_1) - .with_parent_block_number(1) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash_2 = block.header.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(hash_2) - .with_parent_block_number(2) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash_3 = block.header.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + let (backend, client) = init_backend(); + + let hashes = produce_blocks(client, 3); + let (hash_1, hash_2, hash_3) = (hashes[0], hashes[1], hashes[2]); let mut subs = SubscriptionsInner::new(10, Duration::from_secs(10), MAX_OPERATIONS_PER_SUB, backend); @@ -1256,37 +1221,10 @@ mod tests { #[test] fn subscription_check_limits() { - let (backend, mut client) = init_backend(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(client.chain_info().genesis_hash) - .with_parent_block_number(0) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash_1 = block.header.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(hash_1) - .with_parent_block_number(1) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash_2 = block.header.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(hash_2) - .with_parent_block_number(2) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash_3 = block.header.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + let (backend, client) = init_backend(); + + let hashes = produce_blocks(client, 3); + let (hash_1, hash_2, hash_3) = (hashes[0], hashes[1], hashes[2]); // Maximum number of pinned blocks is 2. let mut subs = @@ -1328,37 +1266,10 @@ mod tests { #[test] fn subscription_check_limits_with_duration() { - let (backend, mut client) = init_backend(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(client.chain_info().genesis_hash) - .with_parent_block_number(0) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash_1 = block.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(hash_1) - .with_parent_block_number(1) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash_2 = block.header.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(hash_2) - .with_parent_block_number(2) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash_3 = block.header.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + let (backend, client) = init_backend(); + + let hashes = produce_blocks(client, 3); + let (hash_1, hash_2, hash_3) = (hashes[0], hashes[1], hashes[2]); // Maximum number of pinned blocks is 2 and maximum pin duration is 5 second. let mut subs = @@ -1455,4 +1366,90 @@ mod tests { let permit_three = ops.reserve_at_most(1).unwrap(); assert_eq!(permit_three.num_ops, 1); } + + #[test] + fn stop_all_subscriptions() { + let (backend, client) = init_backend(); + + let hashes = produce_blocks(client, 3); + let (hash_1, hash_2, hash_3) = (hashes[0], hashes[1], hashes[2]); + + let mut subs = + SubscriptionsInner::new(10, Duration::from_secs(10), MAX_OPERATIONS_PER_SUB, backend); + let id_1 = "abc".to_string(); + let id_2 = "abcd".to_string(); + + // Pin all blocks for the first subscription. + let _stop = subs.insert_subscription(id_1.clone(), true).unwrap(); + assert_eq!(subs.pin_block(&id_1, hash_1).unwrap(), true); + assert_eq!(subs.pin_block(&id_1, hash_2).unwrap(), true); + assert_eq!(subs.pin_block(&id_1, hash_3).unwrap(), true); + + // Pin only block 2 for the second subscription. + let _stop = subs.insert_subscription(id_2.clone(), true).unwrap(); + assert_eq!(subs.pin_block(&id_2, hash_2).unwrap(), true); + + // Check reference count. + assert_eq!(*subs.global_blocks.get(&hash_1).unwrap(), 1); + assert_eq!(*subs.global_blocks.get(&hash_2).unwrap(), 2); + assert_eq!(*subs.global_blocks.get(&hash_3).unwrap(), 1); + assert_eq!(subs.global_blocks.len(), 3); + + // Stop all active subscriptions. + subs.stop_all_subscriptions(); + assert!(subs.global_blocks.is_empty()); + } + + #[test] + fn reserved_subscription_cleans_resources() { + let builder = TestClientBuilder::new(); + let backend = builder.backend(); + let subs = Arc::new(parking_lot::RwLock::new(SubscriptionsInner::new( + 10, + Duration::from_secs(10), + MAX_OPERATIONS_PER_SUB, + backend, + ))); + + // Maximum 2 subscriptions per connection. + let rpc_connections = crate::common::connections::RpcConnections::new(2); + + let subscription_management = + crate::chain_head::subscription::SubscriptionManagement::_from_inner( + subs.clone(), + rpc_connections.clone(), + ); + + let reserved_sub_first = subscription_management.reserve_subscription(1).unwrap(); + let mut reserved_sub_second = subscription_management.reserve_subscription(1).unwrap(); + // Subscriptions reserved but not yet populated. + assert_eq!(subs.read().subs.len(), 0); + + // Cannot reserve anymore. + assert!(subscription_management.reserve_subscription(1).is_none()); + // Drop the first subscription. + drop(reserved_sub_first); + // Space is freed-up for the rpc connections. + let mut reserved_sub_first = subscription_management.reserve_subscription(1).unwrap(); + + // Insert subscriptions. + let _sub_data_first = + reserved_sub_first.insert_subscription("sub1".to_string(), true).unwrap(); + let _sub_data_second = + reserved_sub_second.insert_subscription("sub2".to_string(), true).unwrap(); + // Check we have 2 subscriptions under management. + assert_eq!(subs.read().subs.len(), 2); + + // Drop first reserved subscription. + drop(reserved_sub_first); + // Check that the subscription is removed. + assert_eq!(subs.read().subs.len(), 1); + // Space is freed-up for the rpc connections. + let reserved_sub_first = subscription_management.reserve_subscription(1).unwrap(); + + // Drop all subscriptions. + drop(reserved_sub_first); + drop(reserved_sub_second); + assert_eq!(subs.read().subs.len(), 0); + } } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/subscription/mod.rs b/substrate/client/rpc-spec-v2/src/chain_head/subscription/mod.rs index c830e662da2e..f266c9d8b34f 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/subscription/mod.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/subscription/mod.rs @@ -16,6 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use jsonrpsee::ConnectionId; use parking_lot::RwLock; use sc_client_api::Backend; use sp_runtime::traits::Block as BlockT; @@ -24,6 +25,11 @@ use std::{sync::Arc, time::Duration}; mod error; mod inner; +use crate::{ + chain_head::chain_head::LOG_TARGET, + common::connections::{RegisteredConnection, ReservedConnection, RpcConnections}, +}; + use self::inner::SubscriptionsInner; pub use self::inner::OperationState; @@ -34,7 +40,22 @@ pub use inner::{BlockGuard, InsertedSubscriptionData}; pub struct SubscriptionManagement> { /// Manage subscription by mapping the subscription ID /// to a set of block hashes. - inner: RwLock>, + inner: Arc>>, + + /// Ensures that chainHead methods can be called from a single connection context. + /// + /// For example, `chainHead_storage` cannot be called with a subscription ID that + /// was obtained from a different connection. + rpc_connections: RpcConnections, +} + +impl> Clone for SubscriptionManagement { + fn clone(&self) -> Self { + SubscriptionManagement { + inner: self.inner.clone(), + rpc_connections: self.rpc_connections.clone(), + } + } } impl> SubscriptionManagement { @@ -43,30 +64,55 @@ impl> SubscriptionManagement { global_max_pinned_blocks: usize, local_max_pin_duration: Duration, max_ongoing_operations: usize, + max_follow_subscriptions_per_connection: usize, backend: Arc, ) -> Self { SubscriptionManagement { - inner: RwLock::new(SubscriptionsInner::new( + inner: Arc::new(RwLock::new(SubscriptionsInner::new( global_max_pinned_blocks, local_max_pin_duration, max_ongoing_operations, backend, - )), + ))), + rpc_connections: RpcConnections::new(max_follow_subscriptions_per_connection), } } - /// Insert a new subscription ID. + /// Create a new instance from the inner state. /// - /// If the subscription was not previously inserted, returns the receiver that is - /// triggered upon the "Stop" event. Otherwise, if the subscription ID was already - /// inserted returns none. - pub fn insert_subscription( + /// # Note + /// + /// Used for testing. + #[cfg(test)] + pub(crate) fn _from_inner( + inner: Arc>>, + rpc_connections: RpcConnections, + ) -> Self { + SubscriptionManagement { inner, rpc_connections } + } + + /// Reserve space for a subscriptions. + /// + /// Fails if the connection ID is has reached the maximum number of active subscriptions. + pub fn reserve_subscription( &self, - sub_id: String, - runtime_updates: bool, - ) -> Option> { - let mut inner = self.inner.write(); - inner.insert_subscription(sub_id, runtime_updates) + connection_id: ConnectionId, + ) -> Option> { + let reserved_token = self.rpc_connections.reserve_space(connection_id)?; + + Some(ReservedSubscription { + state: ConnectionState::Reserved(reserved_token), + inner: self.inner.clone(), + }) + } + + /// Check if the given connection contains the given subscription. + pub fn contains_subscription( + &self, + connection_id: ConnectionId, + subscription_id: &str, + ) -> bool { + self.rpc_connections.contains_identifier(connection_id, subscription_id) } /// Remove the subscription ID with associated pinned blocks. @@ -136,3 +182,72 @@ impl> SubscriptionManagement { inner.get_operation(sub_id, operation_id) } } + +/// The state of the connection. +/// +/// The state starts in a [`ConnectionState::Reserved`] state and then transitions to +/// [`ConnectionState::Registered`] when the subscription is inserted. +enum ConnectionState { + Reserved(ReservedConnection), + Registered { _unregister_on_drop: RegisteredConnection, sub_id: String }, + Empty, +} + +/// RAII wrapper that removes the subscription from internal mappings and +/// gives back the reserved space for the connection. +pub struct ReservedSubscription> { + state: ConnectionState, + inner: Arc>>, +} + +impl> ReservedSubscription { + /// Insert a new subscription ID. + /// + /// If the subscription was not previously inserted, returns the receiver that is + /// triggered upon the "Stop" event. Otherwise, if the subscription ID was already + /// inserted returns none. + /// + /// # Note + /// + /// This method should be called only once. + pub fn insert_subscription( + &mut self, + sub_id: String, + runtime_updates: bool, + ) -> Option> { + match std::mem::replace(&mut self.state, ConnectionState::Empty) { + ConnectionState::Reserved(reserved) => { + let registered_token = reserved.register(sub_id.clone())?; + self.state = ConnectionState::Registered { + _unregister_on_drop: registered_token, + sub_id: sub_id.clone(), + }; + + let mut inner = self.inner.write(); + inner.insert_subscription(sub_id, runtime_updates) + }, + // Cannot insert multiple subscriptions into one single reserved space. + ConnectionState::Registered { .. } | ConnectionState::Empty => { + log::error!(target: LOG_TARGET, "Called insert_subscription on a connection that is not reserved"); + None + }, + } + } + + /// Stop all active subscriptions. + /// + /// For all active subscriptions, the internal data is discarded, blocks are unpinned and the + /// `Stop` event will be generated. + pub fn stop_all_subscriptions(&self) { + let mut inner = self.inner.write(); + inner.stop_all_subscriptions() + } +} + +impl> Drop for ReservedSubscription { + fn drop(&mut self) { + if let ConnectionState::Registered { sub_id, .. } = &self.state { + self.inner.write().remove_subscription(sub_id); + } + } +} diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index 4d9dfb24e0a9..c2bff7c50d5e 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -17,7 +17,7 @@ // along with this program. If not, see . use crate::{ - chain_head::{event::MethodResponse, test_utils::ChainHeadMockClient}, + chain_head::{api::ChainHeadApiClient, event::MethodResponse, test_utils::ChainHeadMockClient}, common::events::{StorageQuery, StorageQueryType, StorageResultType}, hex_string, }; @@ -27,8 +27,12 @@ use assert_matches::assert_matches; use codec::{Decode, Encode}; use futures::Future; use jsonrpsee::{ - core::server::Subscription as RpcSubscription, rpc_params, MethodsError as Error, RpcModule, + core::{ + client::Subscription as RpcClientSubscription, server::Subscription as RpcSubscription, + }, + rpc_params, MethodsError as Error, RpcModule, }; + use sc_block_builder::BlockBuilderBuilder; use sc_client_api::ChildInfo; use sc_service::client::new_in_mem; @@ -59,6 +63,9 @@ const MAX_PINNED_BLOCKS: usize = 32; const MAX_PINNED_SECS: u64 = 60; const MAX_OPERATIONS: usize = 16; const MAX_PAGINATION_LIMIT: usize = 5; +const MAX_LAGGING_DISTANCE: usize = 128; +const MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION: usize = 4; + const INVALID_HASH: [u8; 32] = [1; 32]; const KEY: &[u8] = b":mock"; const VALUE: &[u8] = b"hello world"; @@ -66,6 +73,36 @@ const CHILD_STORAGE_KEY: &[u8] = b"child"; const CHILD_VALUE: &[u8] = b"child value"; const DOES_NOT_PRODUCE_EVENTS_SECONDS: u64 = 10; +/// Start an RPC server with the chainHead module. +pub async fn run_server() -> std::net::SocketAddr { + let builder = TestClientBuilder::new(); + let backend = builder.backend(); + let client = Arc::new(builder.build()); + + let api = ChainHead::new( + client, + backend, + Arc::new(TaskExecutor::default()), + ChainHeadConfig { + global_max_pinned_blocks: MAX_PINNED_BLOCKS, + subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), + subscription_max_ongoing_operations: MAX_OPERATIONS, + operation_max_storage_items: MAX_PAGINATION_LIMIT, + max_follow_subscriptions_per_connection: 1, + max_lagging_distance: MAX_LAGGING_DISTANCE, + }, + ) + .into_rpc(); + + let server = jsonrpsee::server::ServerBuilder::default().build("127.0.0.1:0").await.unwrap(); + + let addr = server.local_addr().unwrap(); + let handle = server.start(api); + + tokio::spawn(handle.stopped()); + addr +} + async fn get_next_event(sub: &mut RpcSubscription) -> T { let (event, _sub_id) = tokio::time::timeout(std::time::Duration::from_secs(60), sub.next()) .await @@ -113,6 +150,8 @@ async fn setup_api() -> ( subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -163,6 +202,9 @@ async fn follow_subscription_produces_blocks() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -231,6 +273,9 @@ async fn follow_with_runtime() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -543,6 +588,9 @@ async fn call_runtime_without_flag() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -1201,6 +1249,9 @@ async fn separate_operation_ids_for_subscriptions() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -1289,6 +1340,9 @@ async fn follow_generates_initial_blocks() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -1444,6 +1498,9 @@ async fn follow_exceeding_pinned_blocks() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -1520,6 +1577,9 @@ async fn follow_with_unpin() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -1631,6 +1691,9 @@ async fn unpin_duplicate_hashes() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -1733,6 +1796,9 @@ async fn follow_with_multiple_unpin_hashes() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -1886,6 +1952,9 @@ async fn follow_prune_best_block() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -2071,6 +2140,9 @@ async fn follow_forks_pruned_block() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -2230,6 +2302,9 @@ async fn follow_report_multiple_pruned_block() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -2475,6 +2550,9 @@ async fn pin_block_references() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -2612,6 +2690,9 @@ async fn follow_finalized_before_new_block() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -2726,6 +2807,9 @@ async fn ensure_operation_limits_works() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: 1, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -2786,7 +2870,7 @@ async fn ensure_operation_limits_works() { FollowEvent::OperationStorageDone(done) if done.operation_id == operation_id ); - // The storage is finished and capactiy must be released. + // The storage is finished and capacity must be released. let alice_id = AccountKeyring::Alice.to_account_id(); // Hex encoded scale encoded bytes representing the call parameters. let call_parameters = hex_string(&alice_id.encode()); @@ -2830,6 +2914,9 @@ async fn check_continue_operation() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: 1, + + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -3012,6 +3099,9 @@ async fn stop_storage_operation() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: 1, + + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -3113,7 +3203,7 @@ async fn storage_closest_merkle_value() { /// The core of this test. /// - /// Checks keys that are exact match, keys with descedant and keys that should not return + /// Checks keys that are exact match, keys with descendant and keys that should not return /// values. /// /// Returns (key, merkle value) pairs. @@ -3139,7 +3229,7 @@ async fn storage_closest_merkle_value() { key: hex_string(b":AAAB"), query_type: StorageQueryType::ClosestDescendantMerkleValue }, - // Key with descedent. + // Key with descendant. StorageQuery { key: hex_string(b":A"), query_type: StorageQueryType::ClosestDescendantMerkleValue @@ -3297,3 +3387,260 @@ async fn storage_closest_merkle_value() { merkle_values_rhs.get(&hex_string(b":AAAA")).unwrap() ); } + +#[tokio::test] +async fn chain_head_stop_all_subscriptions() { + let builder = TestClientBuilder::new(); + let backend = builder.backend(); + let mut client = Arc::new(builder.build()); + + // Configure the chainHead to stop all subscriptions on lagging distance of 5 blocks. + let api = ChainHead::new( + client.clone(), + backend, + Arc::new(TaskExecutor::default()), + ChainHeadConfig { + global_max_pinned_blocks: MAX_PINNED_BLOCKS, + subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), + subscription_max_ongoing_operations: MAX_OPERATIONS, + operation_max_storage_items: MAX_PAGINATION_LIMIT, + max_lagging_distance: 5, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + }, + ) + .into_rpc(); + + let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + + // Ensure the imported block is propagated and pinned for this subscription. + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::Initialized(_) + ); + + // Import 6 blocks in total to trigger the suspension distance. + let mut parent_hash = client.chain_info().genesis_hash; + for i in 0..6 { + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(parent_hash) + .with_parent_block_number(i) + .build() + .unwrap() + .build() + .unwrap() + .block; + + let hash = block.hash(); + parent_hash = hash; + client.import(BlockOrigin::Own, block.clone()).await.unwrap(); + + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::NewBlock(_) + ); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::BestBlockChanged(_) + ); + } + + let mut second_sub = + api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + // Lagging detected, the stop event is delivered immediately. + assert_matches!( + get_next_event::>(&mut second_sub).await, + FollowEvent::Stop + ); + + // Ensure that all subscriptions are stopped. + assert_matches!(get_next_event::>(&mut sub).await, FollowEvent::Stop); + + // Other subscriptions cannot be started until the suspension period is over. + let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + // Should receive the stop event immediately. + assert_matches!(get_next_event::>(&mut sub).await, FollowEvent::Stop); + + // For the next subscription, lagging distance must be smaller. + client.finalize_block(parent_hash, None).unwrap(); + + let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::Initialized(_) + ); +} + +#[tokio::test] +async fn chain_head_single_connection_context() { + let server_addr = run_server().await; + let server_url = format!("ws://{}", server_addr); + let client = jsonrpsee::ws_client::WsClientBuilder::default() + .build(&server_url) + .await + .unwrap(); + // Calls cannot be made from a different connection context. + let second_client = jsonrpsee::ws_client::WsClientBuilder::default() + .build(&server_url) + .await + .unwrap(); + + let mut sub: RpcClientSubscription> = + ChainHeadApiClient::::chain_head_unstable_follow(&client, true) + .await + .unwrap(); + + let event = tokio::time::timeout(std::time::Duration::from_secs(60), sub.next()) + .await + .unwrap() + .unwrap() + .unwrap(); + let finalized_hash = match event { + FollowEvent::Initialized(init) => init.finalized_block_hashes.into_iter().last().unwrap(), + _ => panic!("Expected FollowEvent::Initialized"), + }; + + let first_sub_id = match sub.kind() { + jsonrpsee::core::client::SubscriptionKind::Subscription(id) => match id { + jsonrpsee::types::SubscriptionId::Num(num) => num.to_string(), + jsonrpsee::types::SubscriptionId::Str(s) => s.to_string(), + }, + _ => panic!("Unexpected subscription ID"), + }; + + // Trying to unpin from a different connection will have no effect. + let _response = ChainHeadApiClient::::chain_head_unstable_unpin( + &second_client, + first_sub_id.clone(), + crate::chain_head::api::ListOrValue::Value(finalized_hash.clone()), + ) + .await + .unwrap(); + + // Body can still be fetched from the first subscription. + let response: MethodResponse = ChainHeadApiClient::::chain_head_unstable_body( + &client, + first_sub_id.clone(), + finalized_hash.clone(), + ) + .await + .unwrap(); + assert_matches!(response, MethodResponse::Started(_started)); + + // Cannot make a call from a different connection context. + let response: MethodResponse = ChainHeadApiClient::::chain_head_unstable_body( + &second_client, + first_sub_id.clone(), + finalized_hash.clone(), + ) + .await + .unwrap(); + assert_matches!(response, MethodResponse::LimitReached); + + let response: Option = ChainHeadApiClient::::chain_head_unstable_header( + &client, + first_sub_id.clone(), + finalized_hash.clone(), + ) + .await + .unwrap(); + assert!(response.is_some()); + // Cannot make a call from a different connection context. + let response: Option = ChainHeadApiClient::::chain_head_unstable_header( + &second_client, + first_sub_id.clone(), + finalized_hash.clone(), + ) + .await + .unwrap(); + assert!(response.is_none()); + + let key = hex_string(&KEY); + let response: MethodResponse = ChainHeadApiClient::::chain_head_unstable_storage( + &client, + first_sub_id.clone(), + finalized_hash.clone(), + vec![StorageQuery { key: key.clone(), query_type: StorageQueryType::Hash }], + None, + ) + .await + .unwrap(); + assert_matches!(response, MethodResponse::Started(_started)); + // Cannot make a call from a different connection context. + let response: MethodResponse = ChainHeadApiClient::::chain_head_unstable_storage( + &second_client, + first_sub_id.clone(), + finalized_hash.clone(), + vec![StorageQuery { key: key.clone(), query_type: StorageQueryType::Hash }], + None, + ) + .await + .unwrap(); + assert_matches!(response, MethodResponse::LimitReached); + + let alice_id = AccountKeyring::Alice.to_account_id(); + // Hex encoded scale encoded bytes representing the call parameters. + let call_parameters = hex_string(&alice_id.encode()); + let response: MethodResponse = ChainHeadApiClient::::chain_head_unstable_call( + &client, + first_sub_id.clone(), + finalized_hash.clone(), + "AccountNonceApi_account_nonce".into(), + call_parameters.clone(), + ) + .await + .unwrap(); + assert_matches!(response, MethodResponse::Started(_started)); + // Cannot make a call from a different connection context. + let response: MethodResponse = ChainHeadApiClient::::chain_head_unstable_call( + &second_client, + first_sub_id.clone(), + finalized_hash.clone(), + "AccountNonceApi_account_nonce".into(), + call_parameters.clone(), + ) + .await + .unwrap(); + assert_matches!(response, MethodResponse::LimitReached); +} + +#[tokio::test] +async fn chain_head_limit_reached() { + let builder = TestClientBuilder::new(); + let backend = builder.backend(); + let client = Arc::new(builder.build()); + + // Maximum of 1 chainHead_follow subscription. + let api = ChainHead::new( + client.clone(), + backend, + Arc::new(TaskExecutor::default()), + ChainHeadConfig { + global_max_pinned_blocks: MAX_PINNED_BLOCKS, + subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), + subscription_max_ongoing_operations: MAX_OPERATIONS, + operation_max_storage_items: MAX_PAGINATION_LIMIT, + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: 1, + }, + ) + .into_rpc(); + + let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + + // Initialized must always be reported first. + let _event: FollowEvent = get_next_event(&mut sub).await; + + let error = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap_err(); + assert!(error + .to_string() + .contains("Maximum number of chainHead_follow has been reached")); + + // After dropping the subscription, other subscriptions are allowed to be created. + drop(sub); + // Ensure the `chainHead_unfollow` is propagated to the server. + tokio::time::sleep(std::time::Duration::from_secs(5)).await; + + let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + // Initialized must always be reported first. + let _event: FollowEvent = get_next_event(&mut sub).await; +} diff --git a/substrate/client/rpc-spec-v2/src/common/connections.rs b/substrate/client/rpc-spec-v2/src/common/connections.rs new file mode 100644 index 000000000000..c16a80bf49db --- /dev/null +++ b/substrate/client/rpc-spec-v2/src/common/connections.rs @@ -0,0 +1,262 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use jsonrpsee::ConnectionId; +use parking_lot::Mutex; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; + +/// Connection state which keeps track whether a connection exist and +/// the number of concurrent operations. +#[derive(Default, Clone)] +pub struct RpcConnections { + /// The number of identifiers that can be registered for each connection. + /// + /// # Example + /// + /// This is used to limit how many `chainHead_follow` subscriptions are active at one time. + capacity: usize, + /// Map the connecton ID to a set of identifiers. + data: Arc>>, +} + +#[derive(Default)] +struct ConnectionData { + /// The total number of identifiers for the given connection. + /// + /// An identifier for a connection might be: + /// - the subscription ID for chainHead_follow + /// - the operation ID for the transactionBroadcast API + /// - or simply how many times the transaction API has been called. + /// + /// # Note + /// + /// Because a pending subscription sink does not expose the future subscription ID, + /// we cannot register a subscription ID before the pending subscription is accepted. + /// This variable ensures that we have enough capacity to register an identifier, after + /// the subscription is accepted. Otherwise, a jsonrpc error object should be returned. + num_identifiers: usize, + /// Active registered identifiers for the given connection. + /// + /// # Note + /// + /// For chainHead, this represents the subscription ID. + /// For transactionBroadcast, this represents the operation ID. + /// For transaction, this is empty and the number of active calls is tracked by + /// [`Self::num_identifiers`]. + identifiers: HashSet, +} + +impl RpcConnections { + /// Constructs a new instance of [`RpcConnections`]. + pub fn new(capacity: usize) -> Self { + RpcConnections { capacity, data: Default::default() } + } + + /// Reserve space for a new connection identifier. + /// + /// If the number of active identifiers for the given connection exceeds the capacity, + /// returns None. + pub fn reserve_space(&self, connection_id: ConnectionId) -> Option { + let mut data = self.data.lock(); + + let entry = data.entry(connection_id).or_insert_with(ConnectionData::default); + if entry.num_identifiers >= self.capacity { + return None; + } + entry.num_identifiers = entry.num_identifiers.saturating_add(1); + + Some(ReservedConnection { connection_id, rpc_connections: Some(self.clone()) }) + } + + /// Gives back the reserved space before the connection identifier is registered. + /// + /// # Note + /// + /// This may happen if the pending subscription cannot be accepted (unlikely). + fn unreserve_space(&self, connection_id: ConnectionId) { + let mut data = self.data.lock(); + + let entry = data.entry(connection_id).or_insert_with(ConnectionData::default); + entry.num_identifiers = entry.num_identifiers.saturating_sub(1); + + if entry.num_identifiers == 0 { + data.remove(&connection_id); + } + } + + /// Register an identifier for the given connection. + /// + /// Users must call [`Self::reserve_space`] before calling this method to ensure enough + /// space is available. + /// + /// Returns true if the identifier was inserted successfully, false if the identifier was + /// already inserted or reached capacity. + fn register_identifier(&self, connection_id: ConnectionId, identifier: String) -> bool { + let mut data = self.data.lock(); + + let entry = data.entry(connection_id).or_insert_with(ConnectionData::default); + // Should be already checked `Self::reserve_space`. + if entry.identifiers.len() >= self.capacity { + return false; + } + + entry.identifiers.insert(identifier) + } + + /// Unregister an identifier for the given connection. + fn unregister_identifier(&self, connection_id: ConnectionId, identifier: &str) { + let mut data = self.data.lock(); + if let Some(connection_data) = data.get_mut(&connection_id) { + connection_data.identifiers.remove(identifier); + connection_data.num_identifiers = connection_data.num_identifiers.saturating_sub(1); + + if connection_data.num_identifiers == 0 { + data.remove(&connection_id); + } + } + } + + /// Check if the given connection contains the given identifier. + pub fn contains_identifier(&self, connection_id: ConnectionId, identifier: &str) -> bool { + let data = self.data.lock(); + data.get(&connection_id) + .map(|connection_data| connection_data.identifiers.contains(identifier)) + .unwrap_or(false) + } +} + +/// RAII wrapper that ensures the reserved space is given back if the object is +/// dropped before the identifier is registered. +pub struct ReservedConnection { + connection_id: ConnectionId, + rpc_connections: Option, +} + +impl ReservedConnection { + /// Register the identifier for the given connection. + pub fn register(mut self, identifier: String) -> Option { + let rpc_connections = self.rpc_connections.take()?; + + if rpc_connections.register_identifier(self.connection_id, identifier.clone()) { + Some(RegisteredConnection { + connection_id: self.connection_id, + identifier, + rpc_connections, + }) + } else { + None + } + } +} + +impl Drop for ReservedConnection { + fn drop(&mut self) { + if let Some(rpc_connections) = self.rpc_connections.take() { + rpc_connections.unreserve_space(self.connection_id); + } + } +} + +/// RAII wrapper that ensures the identifier is unregistered if the object is dropped. +pub struct RegisteredConnection { + connection_id: ConnectionId, + identifier: String, + rpc_connections: RpcConnections, +} + +impl Drop for RegisteredConnection { + fn drop(&mut self) { + self.rpc_connections.unregister_identifier(self.connection_id, &self.identifier); + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn reserve_space() { + let rpc_connections = RpcConnections::new(2); + let reserved = rpc_connections.reserve_space(1); + assert!(reserved.is_some()); + assert_eq!(1, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + assert_eq!(rpc_connections.data.lock().len(), 1); + + let reserved = reserved.unwrap(); + let registered = reserved.register("identifier1".to_string()).unwrap(); + assert!(rpc_connections.contains_identifier(1, "identifier1")); + assert_eq!(1, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + drop(registered); + + // Data is dropped. + assert!(rpc_connections.data.lock().get(&1).is_none()); + assert!(rpc_connections.data.lock().is_empty()); + // Checks can still happen. + assert!(!rpc_connections.contains_identifier(1, "identifier1")); + } + + #[test] + fn reserve_space_capacity_reached() { + let rpc_connections = RpcConnections::new(2); + + // Reserve identifier for connection 1. + let reserved = rpc_connections.reserve_space(1); + assert!(reserved.is_some()); + assert_eq!(1, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + + // Add identifier for connection 1. + let reserved = reserved.unwrap(); + let registered = reserved.register("identifier1".to_string()).unwrap(); + assert!(rpc_connections.contains_identifier(1, "identifier1")); + assert_eq!(1, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + + // Reserve identifier for connection 1 again. + let reserved = rpc_connections.reserve_space(1); + assert!(reserved.is_some()); + assert_eq!(2, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + + // Add identifier for connection 1 again. + let reserved = reserved.unwrap(); + let registered_second = reserved.register("identifier2".to_string()).unwrap(); + assert!(rpc_connections.contains_identifier(1, "identifier2")); + assert_eq!(2, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + + // Cannot reserve more identifiers. + let reserved = rpc_connections.reserve_space(1); + assert!(reserved.is_none()); + + // Drop the first identifier. + drop(registered); + assert_eq!(1, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + assert!(rpc_connections.contains_identifier(1, "identifier2")); + assert!(!rpc_connections.contains_identifier(1, "identifier1")); + + // Can reserve again after clearing the space. + let reserved = rpc_connections.reserve_space(1); + assert!(reserved.is_some()); + assert_eq!(2, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + + // Ensure data is cleared. + drop(reserved); + drop(registered_second); + assert!(rpc_connections.data.lock().get(&1).is_none()); + } +} diff --git a/substrate/client/rpc-spec-v2/src/common/mod.rs b/substrate/client/rpc-spec-v2/src/common/mod.rs index ac1af8fce3c9..3167561d649a 100644 --- a/substrate/client/rpc-spec-v2/src/common/mod.rs +++ b/substrate/client/rpc-spec-v2/src/common/mod.rs @@ -13,5 +13,6 @@ //! Common types and functionality for the RPC-V2 spec. +pub mod connections; pub mod events; pub mod storage; diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs index 690a1a64d746..77a28968aedf 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs @@ -372,7 +372,7 @@ async fn tx_broadcast_resubmits_invalid_tx() { client_mock.trigger_import_stream(block_2_header).await; // Ensure we propagate the temporary ban error to `submit_and_watch`. - // This ensures we'll loop again with the next annmounced block and try to resubmit the + // This ensures we'll loop again with the next announced block and try to resubmit the // transaction. The transaction remains temporarily banned until the pool is maintained. let event = get_next_event!(&mut pool_middleware); assert_matches!(event, MiddlewarePoolEvent::PoolError { transaction, err } if transaction == xt && err.contains("Transaction temporarily Banned")); @@ -429,7 +429,7 @@ async fn tx_broadcast_resubmits_invalid_tx() { } /// This is similar to `tx_broadcast_resubmits_invalid_tx`. -/// However, it forces the tx to be resubmited because of the pool +/// However, it forces the tx to be resubmitted because of the pool /// limits. Which is a different code path than the invalid tx. #[tokio::test] async fn tx_broadcast_resubmits_dropped_tx() { @@ -509,7 +509,7 @@ async fn tx_broadcast_resubmits_dropped_tx() { pool.inner_pool.maintain(event).await; client_mock.trigger_import_stream(block_3_header.clone()).await; - // The first tx is in a finalzied block; the future tx must enter the pool. + // The first tx is in a finalized block; the future tx must enter the pool. let events = get_next_tx_events!(&mut pool_middleware, 3); assert_eq!( events.get(¤t_xt).unwrap(), diff --git a/substrate/client/rpc-spec-v2/src/transaction/transaction_broadcast.rs b/substrate/client/rpc-spec-v2/src/transaction/transaction_broadcast.rs index 92c838261874..6eaf50d6b2e2 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/transaction_broadcast.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/transaction_broadcast.rs @@ -44,7 +44,7 @@ pub struct TransactionBroadcast { pool: Arc, /// Executor to spawn subscriptions. executor: SubscriptionTaskExecutor, - /// The brodcast operation IDs. + /// The broadcast operation IDs. broadcast_ids: Arc>>, } @@ -200,7 +200,7 @@ where } } -/// Returns the last element of the providided stream, or `None` if the stream is closed. +/// Returns the last element of the provided stream, or `None` if the stream is closed. async fn last_stream_element(stream: &mut S) -> Option where S: Stream + Unpin, diff --git a/substrate/client/rpc/Cargo.toml b/substrate/client/rpc/Cargo.toml index f65e6c9a59ec..dff34215b025 100644 --- a/substrate/client/rpc/Cargo.toml +++ b/substrate/client/rpc/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } -futures = "0.3.21" +futures = "0.3.30" jsonrpsee = { version = "0.22", features = ["server"] } log = { workspace = true, default-features = true } parking_lot = "0.12.1" @@ -40,10 +40,10 @@ sp-runtime = { path = "../../primitives/runtime" } sp-session = { path = "../../primitives/session" } sp-version = { path = "../../primitives/version" } sp-statement-store = { path = "../../primitives/statement-store" } -tokio = "1.22.0" +tokio = "1.37" [dev-dependencies] -env_logger = "0.9" +env_logger = "0.11" assert_matches = "1.3.0" sc-block-builder = { path = "../block-builder" } sc-network = { path = "../network" } @@ -51,7 +51,7 @@ sc-network-common = { path = "../network/common" } sc-transaction-pool = { path = "../transaction-pool" } sp-consensus = { path = "../../primitives/consensus/common" } sp-crypto-hashing = { path = "../../primitives/crypto/hashing" } -tokio = "1.22.0" +tokio = "1.37" sp-io = { path = "../../primitives/io" } substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } pretty_assertions = "1.2.1" diff --git a/substrate/client/rpc/src/statement/mod.rs b/substrate/client/rpc/src/statement/mod.rs index b4f432bbbb0e..e99135aec38c 100644 --- a/substrate/client/rpc/src/statement/mod.rs +++ b/substrate/client/rpc/src/statement/mod.rs @@ -89,7 +89,7 @@ impl StatementApiServer for StatementStore { fn submit(&self, encoded: Bytes) -> RpcResult<()> { let statement = Decode::decode(&mut &*encoded) - .map_err(|e| Error::StatementStore(format!("Eror decoding statement: {:?}", e)))?; + .map_err(|e| Error::StatementStore(format!("Error decoding statement: {:?}", e)))?; match self.store.submit(statement, StatementSource::Local) { SubmitResult::New(_) | SubmitResult::Known => Ok(()), // `KnownExpired` should not happen. Expired statements submitted with diff --git a/substrate/client/service/Cargo.toml b/substrate/client/service/Cargo.toml index bbf67d1fbd0a..b81f2e2f55ab 100644 --- a/substrate/client/service/Cargo.toml +++ b/substrate/client/service/Cargo.toml @@ -30,7 +30,7 @@ runtime-benchmarks = [ [dependencies] jsonrpsee = { version = "0.22", features = ["server"] } thiserror = { workspace = true } -futures = "0.3.21" +futures = "0.3.30" rand = "0.8.5" parking_lot = "0.12.1" log = { workspace = true, default-features = true } @@ -79,7 +79,7 @@ sc-tracing = { path = "../tracing" } sc-sysinfo = { path = "../sysinfo" } tracing = "0.1.29" tracing-futures = { version = "0.2.4" } -async-trait = "0.1.74" +async-trait = "0.1.79" tokio = { version = "1.22.0", features = ["parking_lot", "rt-multi-thread", "time"] } tempfile = "3.1.0" directories = "5.0.1" diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs index 31d63c6a81d3..e71313428daf 100644 --- a/substrate/client/service/src/builder.rs +++ b/substrate/client/service/src/builder.rs @@ -64,7 +64,9 @@ use sc_rpc::{ DenyUnsafe, SubscriptionTaskExecutor, }; use sc_rpc_spec_v2::{ - archive::ArchiveApiServer, chain_head::ChainHeadApiServer, transaction::TransactionApiServer, + archive::ArchiveApiServer, + chain_head::ChainHeadApiServer, + transaction::{TransactionApiServer, TransactionBroadcastApiServer}, }; use sc_telemetry::{telemetry, ConnectionMessage, Telemetry, TelemetryHandle, SUBSTRATE_INFO}; use sc_transaction_pool_api::{MaintainedTransactionPool, TransactionPool}; @@ -653,6 +655,13 @@ where (chain, state, child_state) }; + let transaction_broadcast_rpc_v2 = sc_rpc_spec_v2::transaction::TransactionBroadcast::new( + client.clone(), + transaction_pool.clone(), + task_executor.clone(), + ) + .into_rpc(); + let transaction_v2 = sc_rpc_spec_v2::transaction::Transaction::new( client.clone(), transaction_pool.clone(), @@ -708,6 +717,9 @@ where // Part of the RPC v2 spec. rpc_api.merge(transaction_v2).map_err(|e| Error::Application(e.into()))?; + rpc_api + .merge(transaction_broadcast_rpc_v2) + .map_err(|e| Error::Application(e.into()))?; rpc_api.merge(chain_head_v2).map_err(|e| Error::Application(e.into()))?; // Part of the old RPC spec. diff --git a/substrate/client/service/src/chain_ops/import_blocks.rs b/substrate/client/service/src/chain_ops/import_blocks.rs index 34f7669d0106..661fc09a8f19 100644 --- a/substrate/client/service/src/chain_ops/import_blocks.rs +++ b/substrate/client/service/src/chain_ops/import_blocks.rs @@ -73,7 +73,7 @@ where reader: CodecIoReader, }, Json { - // Nubmer of blocks we have decoded thus far. + // Number of blocks we have decoded thus far. read_block_count: u64, // Stream to the data, used for decoding new blocks. reader: StreamDeserializer<'static, JsonIoRead, SignedBlock>, diff --git a/substrate/client/service/src/config.rs b/substrate/client/service/src/config.rs index 35262ff493b4..59e307d7f93b 100644 --- a/substrate/client/service/src/config.rs +++ b/substrate/client/service/src/config.rs @@ -236,7 +236,7 @@ impl Configuration { ProtocolId::from(protocol_id_full) } - /// Returns true if the genesis state writting will be skipped while initializing the genesis + /// Returns true if the genesis state writing will be skipped while initializing the genesis /// block. pub fn no_genesis(&self) -> bool { matches!(self.network.sync_mode, SyncMode::LightState { .. } | SyncMode::Warp { .. }) diff --git a/substrate/client/service/test/Cargo.toml b/substrate/client/service/test/Cargo.toml index ee7e60f60117..2de984689bad 100644 --- a/substrate/client/service/test/Cargo.toml +++ b/substrate/client/service/test/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] async-channel = "1.8.0" array-bytes = "6.1" fdlimit = "0.3.0" -futures = "0.3.21" +futures = "0.3.30" log = { workspace = true, default-features = true } parity-scale-codec = "3.6.1" parking_lot = "0.12.1" diff --git a/substrate/client/service/test/src/client/mod.rs b/substrate/client/service/test/src/client/mod.rs index ba83bec8276e..51dcc4966e58 100644 --- a/substrate/client/service/test/src/client/mod.rs +++ b/substrate/client/service/test/src/client/mod.rs @@ -2226,11 +2226,11 @@ fn reorg_triggers_a_notification_even_for_sources_that_should_not_trigger_notifi #[test] fn use_dalek_ext_works() { fn zero_ed_pub() -> sp_core::ed25519::Public { - sp_core::ed25519::Public([0u8; 32]) + sp_core::ed25519::Public::default() } fn zero_ed_sig() -> sp_core::ed25519::Signature { - sp_core::ed25519::Signature::from_raw([0u8; 64]) + sp_core::ed25519::Signature::default() } let mut client = TestClientBuilder::new().build(); diff --git a/substrate/client/state-db/src/lib.rs b/substrate/client/state-db/src/lib.rs index 41c231c31aaf..6c37c87a611b 100644 --- a/substrate/client/state-db/src/lib.rs +++ b/substrate/client/state-db/src/lib.rs @@ -872,7 +872,7 @@ mod tests { fn check_stored_and_requested_mode_compatibility( mode_when_created: Option, mode_when_reopened: Option, - expected_effective_mode_when_reopenned: Result, + expected_effective_mode_when_reopened: Result, ) { let mut db = make_db(&[]); let (state_db_init, state_db) = @@ -883,7 +883,7 @@ mod tests { let state_db_reopen_result = StateDb::::open(db.clone(), mode_when_reopened, false, false); - if let Ok(expected_mode) = expected_effective_mode_when_reopenned { + if let Ok(expected_mode) = expected_effective_mode_when_reopened { let (state_db_init, state_db_reopened) = state_db_reopen_result.unwrap(); db.commit(&state_db_init); assert_eq!(state_db_reopened.pruning_mode(), expected_mode,) diff --git a/substrate/client/state-db/src/noncanonical.rs b/substrate/client/state-db/src/noncanonical.rs index bdbe8318371c..4492a7bd077d 100644 --- a/substrate/client/state-db/src/noncanonical.rs +++ b/substrate/client/state-db/src/noncanonical.rs @@ -46,26 +46,26 @@ pub struct NonCanonicalOverlay { #[cfg_attr(test, derive(PartialEq, Debug))] struct OverlayLevel { blocks: Vec>, - used_indicies: u64, // Bitmask of available journal indicies. + used_indices: u64, // Bitmask of available journal indices. } impl OverlayLevel { fn push(&mut self, overlay: BlockOverlay) { - self.used_indicies |= 1 << overlay.journal_index; + self.used_indices |= 1 << overlay.journal_index; self.blocks.push(overlay) } fn available_index(&self) -> u64 { - self.used_indicies.trailing_ones() as u64 + self.used_indices.trailing_ones() as u64 } fn remove(&mut self, index: usize) -> BlockOverlay { - self.used_indicies &= !(1 << self.blocks[index].journal_index); + self.used_indices &= !(1 << self.blocks[index].journal_index); self.blocks.remove(index) } fn new() -> OverlayLevel { - OverlayLevel { blocks: Vec::new(), used_indicies: 0 } + OverlayLevel { blocks: Vec::new(), used_indices: 0 } } } diff --git a/substrate/client/statement-store/Cargo.toml b/substrate/client/statement-store/Cargo.toml index 676f6cb36f67..8ca6d11dbe0d 100644 --- a/substrate/client/statement-store/Cargo.toml +++ b/substrate/client/statement-store/Cargo.toml @@ -31,4 +31,4 @@ sc-keystore = { path = "../keystore" } [dev-dependencies] tempfile = "3.1.0" -env_logger = "0.9" +env_logger = "0.11" diff --git a/substrate/client/sysinfo/Cargo.toml b/substrate/client/sysinfo/Cargo.toml index f85eb11bde84..63ed1b925970 100644 --- a/substrate/client/sysinfo/Cargo.toml +++ b/substrate/client/sysinfo/Cargo.toml @@ -17,7 +17,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -futures = "0.3.19" +futures = "0.3.30" libc = "0.2" log = { workspace = true, default-features = true } rand = "0.8.5" diff --git a/substrate/client/telemetry/Cargo.toml b/substrate/client/telemetry/Cargo.toml index 8ab00202f0ba..9a29a33a591f 100644 --- a/substrate/client/telemetry/Cargo.toml +++ b/substrate/client/telemetry/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] chrono = "0.4.31" -futures = "0.3.21" +futures = "0.3.30" libp2p = { version = "0.51.4", features = ["dns", "tcp", "tokio", "wasm-ext", "websocket"] } log = { workspace = true, default-features = true } parking_lot = "0.12.1" diff --git a/substrate/client/telemetry/src/lib.rs b/substrate/client/telemetry/src/lib.rs index 113d8303a20f..7e3a4ee86393 100644 --- a/substrate/client/telemetry/src/lib.rs +++ b/substrate/client/telemetry/src/lib.rs @@ -413,7 +413,7 @@ impl Telemetry { .map_err(|_| Error::TelemetryWorkerDropped) } - /// Make a new cloneable handle to this [`Telemetry`]. This is used for reporting telemetries. + /// Make a new clonable handle to this [`Telemetry`]. This is used for reporting telemetries. pub fn handle(&self) -> TelemetryHandle { TelemetryHandle { message_sender: Arc::new(Mutex::new(self.message_sender.clone())), diff --git a/substrate/client/transaction-pool/Cargo.toml b/substrate/client/transaction-pool/Cargo.toml index 2ca37afd61b8..e2a0b87eaabb 100644 --- a/substrate/client/transaction-pool/Cargo.toml +++ b/substrate/client/transaction-pool/Cargo.toml @@ -16,9 +16,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.6.1" } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" linked-hash-map = "0.5.4" log = { workspace = true, default-features = true } diff --git a/substrate/client/transaction-pool/README.md b/substrate/client/transaction-pool/README.md index b55dc6482d64..7a53727d5761 100644 --- a/substrate/client/transaction-pool/README.md +++ b/substrate/client/transaction-pool/README.md @@ -171,7 +171,7 @@ This parameter instructs the pool propagate/gossip a transaction to node peers. By default this should be `true`, however in some cases it might be undesirable to propagate transactions further. Examples might include heavy transactions produced by block authors in offchain workers (DoS) or risking being front -runned by someone else after finding some non trivial solution or equivocation, +ran by someone else after finding some non trivial solution or equivocation, etc. ### 'TransactionSource` diff --git a/substrate/client/transaction-pool/api/Cargo.toml b/substrate/client/transaction-pool/api/Cargo.toml index d52e4783fabc..1bb72ef55442 100644 --- a/substrate/client/transaction-pool/api/Cargo.toml +++ b/substrate/client/transaction-pool/api/Cargo.toml @@ -12,9 +12,9 @@ description = "Transaction pool client facing API." workspace = true [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.6.1" } -futures = "0.3.21" +futures = "0.3.30" log = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } thiserror = { workspace = true } diff --git a/substrate/client/transaction-pool/tests/pool.rs b/substrate/client/transaction-pool/tests/pool.rs index 461b9860d414..49bd2203c12b 100644 --- a/substrate/client/transaction-pool/tests/pool.rs +++ b/substrate/client/transaction-pool/tests/pool.rs @@ -999,7 +999,7 @@ fn import_notification_to_pool_maintain_works() { .0, ); - // Prepare the extrisic, push it to the pool and check that it was added. + // Prepare the extrinsic, push it to the pool and check that it was added. let xt = uxt(Alice, 0); block_on(pool.submit_one( pool.api().block_id_to_hash(&BlockId::Number(0)).unwrap().unwrap(), diff --git a/substrate/client/utils/Cargo.toml b/substrate/client/utils/Cargo.toml index 7f604219bc09..a101f4b3f3ad 100644 --- a/substrate/client/utils/Cargo.toml +++ b/substrate/client/utils/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] async-channel = "1.8.0" -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" lazy_static = "1.4.0" log = { workspace = true, default-features = true } diff --git a/substrate/client/utils/src/mpsc.rs b/substrate/client/utils/src/mpsc.rs index 7ce25ca7fb4f..b3c2aa59ff2d 100644 --- a/substrate/client/utils/src/mpsc.rs +++ b/substrate/client/utils/src/mpsc.rs @@ -86,7 +86,7 @@ pub fn tracing_unbounded( warning_fired: Arc::new(AtomicBool::new(false)), creation_backtrace: Arc::new(Backtrace::force_capture()), }; - let receiver = TracingUnboundedReceiver { inner: r, name }; + let receiver = TracingUnboundedReceiver { inner: r, name: name.into() }; (sender, receiver) } @@ -157,6 +157,11 @@ impl TracingUnboundedReceiver { pub fn len(&self) -> usize { self.inner.len() } + + /// The name of this receiver + pub fn name(&self) -> &'static str { + self.name + } } impl Drop for TracingUnboundedReceiver { diff --git a/substrate/client/utils/src/notification.rs b/substrate/client/utils/src/notification.rs index dabb85d613cc..3f606aabe3a1 100644 --- a/substrate/client/utils/src/notification.rs +++ b/substrate/client/utils/src/notification.rs @@ -44,7 +44,7 @@ mod tests; /// and identify the mpsc channels. pub trait TracingKeyStr { /// Const `str` representing the "tracing key" used to tag and identify - /// the mpsc channels owned by the object implemeting this trait. + /// the mpsc channels owned by the object implementing this trait. const TRACING_KEY: &'static str; } diff --git a/substrate/frame/Cargo.toml b/substrate/frame/Cargo.toml index 7b17ee1d9304..580e12849bc8 100644 --- a/substrate/frame/Cargo.toml +++ b/substrate/frame/Cargo.toml @@ -1,6 +1,6 @@ [package] -name = "frame" -version = "0.0.1-dev" +name = "polkadot-sdk-frame" +version = "0.1.0" authors = ["Parity Technologies "] edition = "2021" license = "Apache-2.0" @@ -22,7 +22,7 @@ targets = ["x86_64-unknown-linux-gnu"] parity-scale-codec = { version = "3.2.2", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.6.0", default-features = false, features = [ +scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } @@ -50,7 +50,7 @@ sp-inherents = { default-features = false, path = "../primitives/inherents", opt frame-executive = { default-features = false, path = "../frame/executive", optional = true } frame-system-rpc-runtime-api = { default-features = false, path = "../frame/system/rpc/runtime-api", optional = true } -docify = "0.2.7" +docify = "0.2.8" log = { workspace = true } [dev-dependencies] diff --git a/substrate/frame/alliance/Cargo.toml b/substrate/frame/alliance/Cargo.toml index ce35a374a1f9..b237be4d6568 100644 --- a/substrate/frame/alliance/Cargo.toml +++ b/substrate/frame/alliance/Cargo.toml @@ -20,7 +20,7 @@ array-bytes = { version = "6.1", optional = true } log = { workspace = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-core = { path = "../../primitives/core", default-features = false } sp-crypto-hashing = { path = "../../primitives/crypto/hashing", default-features = false, optional = true } diff --git a/substrate/frame/alliance/README.md b/substrate/frame/alliance/README.md index 9930008e2d63..16335a98f59f 100644 --- a/substrate/frame/alliance/README.md +++ b/substrate/frame/alliance/README.md @@ -58,7 +58,7 @@ to update the Alliance's rule and make announcements. - `add_unscrupulous_items` - Add some items, either accounts or websites, to the list of unscrupulous items. - `remove_unscrupulous_items` - Remove some items from the list of unscrupulous items. -- `abdicate_fellow_status` - Abdicate one's voting rights, demoting themself to Ally. +- `abdicate_fellow_status` - Abdicate one's voting rights, demoting themselves to Ally. #### Root Calls diff --git a/substrate/frame/alliance/src/benchmarking.rs b/substrate/frame/alliance/src/benchmarking.rs index b4a78a426491..972241a5a69f 100644 --- a/substrate/frame/alliance/src/benchmarking.rs +++ b/substrate/frame/alliance/src/benchmarking.rs @@ -504,8 +504,8 @@ mod benchmarks { assert_last_event::( Event::MembersInitialized { fellows: fellows.clone(), allies: allies.clone() }.into(), ); - assert_eq!(Alliance::::members(MemberRole::Fellow), fellows); - assert_eq!(Alliance::::members(MemberRole::Ally), allies); + assert_eq!(Members::::get(MemberRole::Fellow), fellows); + assert_eq!(Members::::get(MemberRole::Ally), allies); Ok(()) } @@ -562,7 +562,7 @@ mod benchmarks { { call.dispatch_bypass_filter(origin)?; } - assert_eq!(Alliance::::rule(), Some(rule.clone())); + assert_eq!(Rule::::get(), Some(rule.clone())); assert_last_event::(Event::NewRuleSet { rule }.into()); Ok(()) } @@ -582,7 +582,7 @@ mod benchmarks { call.dispatch_bypass_filter(origin)?; } - assert!(Alliance::::announcements().contains(&announcement)); + assert!(Announcements::::get().contains(&announcement)); assert_last_event::(Event::Announced { announcement }.into()); Ok(()) } @@ -605,7 +605,7 @@ mod benchmarks { call.dispatch_bypass_filter(origin)?; } - assert!(!Alliance::::announcements().contains(&announcement)); + assert!(!Announcements::::get().contains(&announcement)); assert_last_event::(Event::AnnouncementRemoved { announcement }.into()); Ok(()) } diff --git a/substrate/frame/alliance/src/lib.rs b/substrate/frame/alliance/src/lib.rs index d6b210c3b00a..1fa4b93b9852 100644 --- a/substrate/frame/alliance/src/lib.rs +++ b/substrate/frame/alliance/src/lib.rs @@ -445,24 +445,20 @@ pub mod pallet { /// The IPFS CID of the alliance rule. /// Fellows can propose a new rule with a super-majority. #[pallet::storage] - #[pallet::getter(fn rule)] pub type Rule, I: 'static = ()> = StorageValue<_, Cid, OptionQuery>; /// The current IPFS CIDs of any announcements. #[pallet::storage] - #[pallet::getter(fn announcements)] pub type Announcements, I: 'static = ()> = StorageValue<_, BoundedVec, ValueQuery>; /// Maps members to their candidacy deposit. #[pallet::storage] - #[pallet::getter(fn deposit_of)] pub type DepositOf, I: 'static = ()> = StorageMap<_, Blake2_128Concat, T::AccountId, BalanceOf, OptionQuery>; /// Maps member type to members of each type. #[pallet::storage] - #[pallet::getter(fn members)] pub type Members, I: 'static = ()> = StorageMap< _, Twox64Concat, @@ -474,20 +470,17 @@ pub mod pallet { /// A set of members who gave a retirement notice. They can retire after the end of retirement /// period stored as a future block number. #[pallet::storage] - #[pallet::getter(fn retiring_members)] pub type RetiringMembers, I: 'static = ()> = StorageMap<_, Blake2_128Concat, T::AccountId, BlockNumberFor, OptionQuery>; /// The current list of accounts deemed unscrupulous. These accounts non grata cannot submit /// candidacy. #[pallet::storage] - #[pallet::getter(fn unscrupulous_accounts)] pub type UnscrupulousAccounts, I: 'static = ()> = StorageValue<_, BoundedVec, ValueQuery>; /// The current list of websites deemed unscrupulous. #[pallet::storage] - #[pallet::getter(fn unscrupulous_websites)] pub type UnscrupulousWebsites, I: 'static = ()> = StorageValue<_, BoundedVec, T::MaxUnscrupulousItems>, ValueQuery>; @@ -508,10 +501,10 @@ pub mod pallet { proposal: Box<>::Proposal>, #[pallet::compact] length_bound: u32, ) -> DispatchResult { - let proposor = ensure_signed(origin)?; - ensure!(Self::has_voting_rights(&proposor), Error::::NoVotingRights); + let proposer = ensure_signed(origin)?; + ensure!(Self::has_voting_rights(&proposer), Error::::NoVotingRights); - T::ProposalProvider::propose_proposal(proposor, threshold, proposal, length_bound)?; + T::ProposalProvider::propose_proposal(proposer, threshold, proposal, length_bound)?; Ok(()) } diff --git a/substrate/frame/alliance/src/migration.rs b/substrate/frame/alliance/src/migration.rs index 432f09a16f47..b4ecc1819447 100644 --- a/substrate/frame/alliance/src/migration.rs +++ b/substrate/frame/alliance/src/migration.rs @@ -162,18 +162,18 @@ pub(crate) mod v1_to_v2 { #[cfg(test)] mod test { use super::*; - use crate::{mock::*, MemberRole}; + use crate::{mock::*, MemberRole, Members}; #[test] fn migration_v1_to_v2_works() { new_test_ext().execute_with(|| { assert_ok!(Alliance::join_alliance(RuntimeOrigin::signed(4))); - assert_eq!(Alliance::members(MemberRole::Ally), vec![4]); - assert_eq!(Alliance::members(MemberRole::Fellow), vec![1, 2, 3]); + assert_eq!(Members::::get(MemberRole::Ally), vec![4]); + assert_eq!(Members::::get(MemberRole::Fellow), vec![1, 2, 3]); v1_to_v2::migrate::(); - assert_eq!(Alliance::members(MemberRole::Fellow), vec![1, 2, 3, 4]); - assert_eq!(Alliance::members(MemberRole::Ally), vec![]); - assert_eq!(Alliance::members(MemberRole::Retiring), vec![]); + assert_eq!(Members::::get(MemberRole::Fellow), vec![1, 2, 3, 4]); + assert_eq!(Members::::get(MemberRole::Ally), vec![]); + assert_eq!(Members::::get(MemberRole::Retiring), vec![]); }); } } diff --git a/substrate/frame/alliance/src/tests.rs b/substrate/frame/alliance/src/tests.rs index 710de5a54bc9..edb515b8115a 100644 --- a/substrate/frame/alliance/src/tests.rs +++ b/substrate/frame/alliance/src/tests.rs @@ -21,12 +21,12 @@ use frame_support::{assert_noop, assert_ok, error::BadOrigin}; use frame_system::{EventRecord, Phase}; use super::*; -use crate::mock::*; +use crate::{self as alliance, mock::*}; type AllianceMotionEvent = pallet_collective::Event; fn assert_powerless(user: RuntimeOrigin, user_is_member: bool) { - //vote / veto with a valid propsal + //vote / veto with a valid proposal let cid = test_cid(); let (proposal, _, _) = make_kick_member_proposal(42); @@ -118,7 +118,7 @@ fn disband_works() { // join alliance and reserve funds assert_eq!(Balances::free_balance(9), 1000 - id_deposit); assert_ok!(Alliance::join_alliance(RuntimeOrigin::signed(9))); - assert_eq!(Alliance::deposit_of(9), Some(expected_join_deposit)); + assert_eq!(alliance::DepositOf::::get(9), Some(expected_join_deposit)); assert_eq!(Balances::free_balance(9), 1000 - id_deposit - expected_join_deposit); assert!(Alliance::is_member_of(&9, MemberRole::Ally)); @@ -314,7 +314,7 @@ fn set_rule_works() { new_test_ext().execute_with(|| { let cid = test_cid(); assert_ok!(Alliance::set_rule(RuntimeOrigin::signed(1), cid.clone())); - assert_eq!(Alliance::rule(), Some(cid.clone())); + assert_eq!(alliance::Rule::::get(), Some(cid.clone())); System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::NewRuleSet { rule: cid, @@ -330,7 +330,7 @@ fn announce_works() { assert_noop!(Alliance::announce(RuntimeOrigin::signed(2), cid.clone()), BadOrigin); assert_ok!(Alliance::announce(RuntimeOrigin::signed(3), cid.clone())); - assert_eq!(Alliance::announcements(), vec![cid.clone()]); + assert_eq!(alliance::Announcements::::get(), vec![cid.clone()]); System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::Announced { announcement: cid, @@ -343,7 +343,7 @@ fn remove_announcement_works() { new_test_ext().execute_with(|| { let cid = test_cid(); assert_ok!(Alliance::announce(RuntimeOrigin::signed(3), cid.clone())); - assert_eq!(Alliance::announcements(), vec![cid.clone()]); + assert_eq!(alliance::Announcements::::get(), vec![cid.clone()]); System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::Announced { announcement: cid.clone(), })); @@ -351,7 +351,7 @@ fn remove_announcement_works() { System::set_block_number(2); assert_ok!(Alliance::remove_announcement(RuntimeOrigin::signed(3), cid.clone())); - assert_eq!(Alliance::announcements(), vec![]); + assert_eq!(alliance::Announcements::::get(), vec![]); System::assert_last_event(mock::RuntimeEvent::Alliance( crate::Event::AnnouncementRemoved { announcement: cid }, )); @@ -394,8 +394,8 @@ fn join_alliance_works() { // success to submit assert_ok!(Alliance::join_alliance(RuntimeOrigin::signed(4))); assert_eq!(Balances::free_balance(4), 1000 - id_deposit - join_deposit); - assert_eq!(Alliance::deposit_of(4), Some(25)); - assert_eq!(Alliance::members(MemberRole::Ally), vec![4]); + assert_eq!(alliance::DepositOf::::get(4), Some(25)); + assert_eq!(alliance::Members::::get(MemberRole::Ally), vec![4]); // check already member assert_noop!( @@ -449,8 +449,8 @@ fn nominate_ally_works() { // success to nominate assert_ok!(Alliance::nominate_ally(RuntimeOrigin::signed(1), 4)); - assert_eq!(Alliance::deposit_of(4), None); - assert_eq!(Alliance::members(MemberRole::Ally), vec![4]); + assert_eq!(alliance::DepositOf::::get(4), None); + assert_eq!(alliance::Members::::get(MemberRole::Ally), vec![4]); // check already member assert_noop!( @@ -482,12 +482,12 @@ fn elevate_ally_works() { ); assert_ok!(Alliance::join_alliance(RuntimeOrigin::signed(4))); - assert_eq!(Alliance::members(MemberRole::Ally), vec![4]); - assert_eq!(Alliance::members(MemberRole::Fellow), vec![1, 2, 3]); + assert_eq!(alliance::Members::::get(MemberRole::Ally), vec![4]); + assert_eq!(alliance::Members::::get(MemberRole::Fellow), vec![1, 2, 3]); assert_ok!(Alliance::elevate_ally(RuntimeOrigin::signed(2), 4)); - assert_eq!(Alliance::members(MemberRole::Ally), Vec::::new()); - assert_eq!(Alliance::members(MemberRole::Fellow), vec![1, 2, 3, 4]); + assert_eq!(alliance::Members::::get(MemberRole::Ally), Vec::::new()); + assert_eq!(alliance::Members::::get(MemberRole::Fellow), vec![1, 2, 3, 4]); }); } @@ -499,10 +499,10 @@ fn give_retirement_notice_work() { Error::::NotMember ); - assert_eq!(Alliance::members(MemberRole::Fellow), vec![1, 2, 3]); + assert_eq!(alliance::Members::::get(MemberRole::Fellow), vec![1, 2, 3]); assert_ok!(Alliance::give_retirement_notice(RuntimeOrigin::signed(3))); - assert_eq!(Alliance::members(MemberRole::Fellow), vec![1, 2]); - assert_eq!(Alliance::members(MemberRole::Retiring), vec![3]); + assert_eq!(alliance::Members::::get(MemberRole::Fellow), vec![1, 2]); + assert_eq!(alliance::Members::::get(MemberRole::Retiring), vec![3]); System::assert_last_event(mock::RuntimeEvent::Alliance( crate::Event::MemberRetirementPeriodStarted { member: (3) }, )); @@ -527,7 +527,7 @@ fn retire_works() { Error::::RetirementNoticeNotGiven ); - assert_eq!(Alliance::members(MemberRole::Fellow), vec![1, 2, 3]); + assert_eq!(alliance::Members::::get(MemberRole::Fellow), vec![1, 2, 3]); assert_ok!(Alliance::give_retirement_notice(RuntimeOrigin::signed(3))); assert_noop!( Alliance::retire(RuntimeOrigin::signed(3)), @@ -535,7 +535,7 @@ fn retire_works() { ); System::set_block_number(System::block_number() + RetirementPeriod::get()); assert_ok!(Alliance::retire(RuntimeOrigin::signed(3))); - assert_eq!(Alliance::members(MemberRole::Fellow), vec![1, 2]); + assert_eq!(alliance::Members::::get(MemberRole::Fellow), vec![1, 2]); System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::MemberRetired { member: (3), unreserved: None, @@ -551,7 +551,7 @@ fn retire_works() { #[test] fn abdicate_works() { new_test_ext().execute_with(|| { - assert_eq!(Alliance::members(MemberRole::Fellow), vec![1, 2, 3]); + assert_eq!(alliance::Members::::get(MemberRole::Fellow), vec![1, 2, 3]); assert_ok!(Alliance::abdicate_fellow_status(RuntimeOrigin::signed(3))); System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::FellowAbdicated { @@ -573,9 +573,9 @@ fn kick_member_works() { ); >::insert(2, 25); - assert_eq!(Alliance::members(MemberRole::Fellow), vec![1, 2, 3]); + assert_eq!(alliance::Members::::get(MemberRole::Fellow), vec![1, 2, 3]); assert_ok!(Alliance::kick_member(RuntimeOrigin::signed(2), 2)); - assert_eq!(Alliance::members(MemberRole::Fellow), vec![1, 3]); + assert_eq!(alliance::Members::::get(MemberRole::Fellow), vec![1, 3]); assert_eq!(>::get(2), None); System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::MemberKicked { member: (2), @@ -596,8 +596,11 @@ fn add_unscrupulous_items_works() { UnscrupulousItem::Website("abc".as_bytes().to_vec().try_into().unwrap()) ] )); - assert_eq!(Alliance::unscrupulous_accounts().into_inner(), vec![3]); - assert_eq!(Alliance::unscrupulous_websites().into_inner(), vec!["abc".as_bytes().to_vec()]); + assert_eq!(alliance::UnscrupulousAccounts::::get().into_inner(), vec![3]); + assert_eq!( + alliance::UnscrupulousWebsites::::get().into_inner(), + vec!["abc".as_bytes().to_vec()] + ); assert_noop!( Alliance::add_unscrupulous_items( @@ -629,12 +632,12 @@ fn remove_unscrupulous_items_works() { RuntimeOrigin::signed(3), vec![UnscrupulousItem::AccountId(3)] )); - assert_eq!(Alliance::unscrupulous_accounts(), vec![3]); + assert_eq!(alliance::UnscrupulousAccounts::::get(), vec![3]); assert_ok!(Alliance::remove_unscrupulous_items( RuntimeOrigin::signed(3), vec![UnscrupulousItem::AccountId(3)] )); - assert_eq!(Alliance::unscrupulous_accounts(), Vec::::new()); + assert_eq!(alliance::UnscrupulousAccounts::::get(), Vec::::new()); }); } diff --git a/substrate/frame/asset-conversion/Cargo.toml b/substrate/frame/asset-conversion/Cargo.toml index adf116bf761b..3824433bf599 100644 --- a/substrate/frame/asset-conversion/Cargo.toml +++ b/substrate/frame/asset-conversion/Cargo.toml @@ -20,7 +20,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-api = { path = "../../primitives/api", default-features = false } sp-core = { path = "../../primitives/core", default-features = false } sp-io = { path = "../../primitives/io", default-features = false } diff --git a/substrate/frame/asset-conversion/src/lib.rs b/substrate/frame/asset-conversion/src/lib.rs index 456d1b808c73..ced33a83747d 100644 --- a/substrate/frame/asset-conversion/src/lib.rs +++ b/substrate/frame/asset-conversion/src/lib.rs @@ -207,7 +207,7 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { - /// A successful call of the `CretaPool` extrinsic will create this event. + /// A successful call of the `CreatePool` extrinsic will create this event. PoolCreated { /// The account that created the pool. creator: T::AccountId, diff --git a/substrate/frame/asset-rate/Cargo.toml b/substrate/frame/asset-rate/Cargo.toml index 59bd24616dbb..5f2625cae6a2 100644 --- a/substrate/frame/asset-rate/Cargo.toml +++ b/substrate/frame/asset-rate/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/asset-rate/src/lib.rs b/substrate/frame/asset-rate/src/lib.rs index 8705e1fc5e70..cfb013a73f5e 100644 --- a/substrate/frame/asset-rate/src/lib.rs +++ b/substrate/frame/asset-rate/src/lib.rs @@ -114,7 +114,7 @@ pub mod pallet { /// The origin permissioned to remove an existing conversion rate for an asset. type RemoveOrigin: EnsureOrigin; - /// The origin permissioned to update an existiing conversion rate for an asset. + /// The origin permissioned to update an existing conversion rate for an asset. type UpdateOrigin: EnsureOrigin; /// The currency mechanism for this pallet. diff --git a/substrate/frame/assets/Cargo.toml b/substrate/frame/assets/Cargo.toml index f7802bc3742b..17f676673d5d 100644 --- a/substrate/frame/assets/Cargo.toml +++ b/substrate/frame/assets/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Needed for various traits. In our case, `OnFinalize`. sp-runtime = { path = "../../primitives/runtime", default-features = false } # Needed for type-safe access to storage DB. diff --git a/substrate/frame/assets/src/lib.rs b/substrate/frame/assets/src/lib.rs index ca429f3557a0..88b4477386ac 100644 --- a/substrate/frame/assets/src/lib.rs +++ b/substrate/frame/assets/src/lib.rs @@ -17,7 +17,16 @@ //! # Assets Pallet //! -//! A simple, secure module for dealing with fungible assets. +//! A simple, secure module for dealing with sets of assets implementing +//! [`fungible`](frame_support::traits::fungible) traits, via +//! [`fungibles`](frame_support::traits::fungibles) traits. +//! +//! The pallet makes heavy use of concepts such as Holds and Freezes from the +//! [`frame_support::traits::fungible`] traits, therefore you should read and understand those docs +//! as a prerequisite to understanding this pallet. +//! +//! See the [`frame_tokens`] reference docs for more information about the place of the +//! Assets pallet in FRAME. //! //! ## Overview //! @@ -133,6 +142,8 @@ //! //! * [`System`](../frame_system/index.html) //! * [`Support`](../frame_support/index.html) +//! +//! [`frame_tokens`]: ../polkadot_sdk_docs/reference_docs/frame_tokens/index.html // This recursion limit is needed because we have too many benchmarks and benchmarking will fail if // we add more without this limit. @@ -184,7 +195,7 @@ pub use weights::WeightInfo; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; const LOG_TARGET: &str = "runtime::assets"; -/// Trait with callbacks that are executed after successfull asset creation or destruction. +/// Trait with callbacks that are executed after successful asset creation or destruction. pub trait AssetsCallback { /// Indicates that asset with `id` was successfully created by the `owner` fn created(_id: &AssetId, _owner: &AccountId) -> Result<(), ()> { diff --git a/substrate/frame/assets/src/tests.rs b/substrate/frame/assets/src/tests.rs index e09648a51ecc..c7021bcad531 100644 --- a/substrate/frame/assets/src/tests.rs +++ b/substrate/frame/assets/src/tests.rs @@ -1453,7 +1453,7 @@ fn force_asset_status_should_work() { )); assert_eq!(Assets::balance(0, 1), 50); - // account can recieve assets for balance < min_balance + // account can receive assets for balance < min_balance assert_ok!(Assets::transfer(RuntimeOrigin::signed(2), 0, 1, 1)); assert_eq!(Assets::balance(0, 1), 51); diff --git a/substrate/frame/atomic-swap/Cargo.toml b/substrate/frame/atomic-swap/Cargo.toml index 57ec443c5f91..fcae79530f77 100644 --- a/substrate/frame/atomic-swap/Cargo.toml +++ b/substrate/frame/atomic-swap/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } sp-core = { path = "../../primitives/core", default-features = false } diff --git a/substrate/frame/aura/Cargo.toml b/substrate/frame/aura/Cargo.toml index e09914862ffd..f8522b658e62 100644 --- a/substrate/frame/aura/Cargo.toml +++ b/substrate/frame/aura/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } pallet-timestamp = { path = "../timestamp", default-features = false } diff --git a/substrate/frame/aura/src/lib.rs b/substrate/frame/aura/src/lib.rs index b2534aa8695a..f829578fb285 100644 --- a/substrate/frame/aura/src/lib.rs +++ b/substrate/frame/aura/src/lib.rs @@ -116,6 +116,7 @@ pub mod pallet { /// The effective value of this type should not change while the chain is running. /// /// For backwards compatibility either use [`MinimumPeriodTimesTwo`] or a const. + #[pallet::constant] type SlotDuration: Get<::Moment>; } diff --git a/substrate/frame/authority-discovery/Cargo.toml b/substrate/frame/authority-discovery/Cargo.toml index 47274f107e1a..59960e89ce68 100644 --- a/substrate/frame/authority-discovery/Cargo.toml +++ b/substrate/frame/authority-discovery/Cargo.toml @@ -16,8 +16,10 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ + "derive", +] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } pallet-session = { path = "../session", default-features = false, features = ["historical"] } diff --git a/substrate/frame/authorship/Cargo.toml b/substrate/frame/authorship/Cargo.toml index 4e7ca8502975..5a09fb26fab5 100644 --- a/substrate/frame/authorship/Cargo.toml +++ b/substrate/frame/authorship/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false } diff --git a/substrate/frame/babe/Cargo.toml b/substrate/frame/babe/Cargo.toml index b7777df2ac49..f9bf21ded43d 100644 --- a/substrate/frame/babe/Cargo.toml +++ b/substrate/frame/babe/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/babe/src/benchmarking.rs b/substrate/frame/babe/src/benchmarking.rs index 92f55665913e..6b0e31e84718 100644 --- a/substrate/frame/babe/src/benchmarking.rs +++ b/substrate/frame/babe/src/benchmarking.rs @@ -31,7 +31,7 @@ benchmarks! { // NOTE: generated with the test below `test_generate_equivocation_report_blob`. // the output is not deterministic since keys are generated randomly (and therefore // signature content changes). it should not affect the benchmark. - // with the current benchmark setup it is not possible to generate this programatically + // with the current benchmark setup it is not possible to generate this programmatically // from the benchmark setup. const EQUIVOCATION_PROOF_BLOB: [u8; 416] = [ 222, 241, 46, 66, 243, 228, 135, 233, 177, 64, 149, 170, 141, 92, 193, 106, 51, 73, 31, diff --git a/substrate/frame/bags-list/Cargo.toml b/substrate/frame/bags-list/Cargo.toml index b58141356a12..39054f898853 100644 --- a/substrate/frame/bags-list/Cargo.toml +++ b/substrate/frame/bags-list/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = [ +scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } @@ -33,7 +33,7 @@ frame-election-provider-support = { path = "../election-provider-support", defau # third party log = { workspace = true } -docify = "0.2.7" +docify = "0.2.8" aquamarine = { version = "0.5.0" } # Optional imports for benchmarking diff --git a/substrate/frame/bags-list/src/list/tests.rs b/substrate/frame/bags-list/src/list/tests.rs index fd4ad8f893af..cd39b0831726 100644 --- a/substrate/frame/bags-list/src/list/tests.rs +++ b/substrate/frame/bags-list/src/list/tests.rs @@ -431,7 +431,7 @@ mod list { #[test] fn insert_at_unchecked_at_is_only_node() { // Note that this `insert_at_unchecked` test should fail post checks because node 42 does - // not get re-assigned the correct bagu pper. This is because `insert_at_unchecked` assumes + // not get re-assigned the correct bag upper. This is because `insert_at_unchecked` assumes // both nodes are already in the same bag with the correct bag upper. ExtBuilder::default().build_and_execute_no_post_check(|| { // given diff --git a/substrate/frame/balances/Cargo.toml b/substrate/frame/balances/Cargo.toml index b9b35ddcdf90..995d157ff917 100644 --- a/substrate/frame/balances/Cargo.toml +++ b/substrate/frame/balances/Cargo.toml @@ -18,12 +18,12 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false } -docify = "0.2.6" +docify = "0.2.8" [dev-dependencies] pallet-transaction-payment = { path = "../transaction-payment" } diff --git a/substrate/frame/balances/src/impl_currency.rs b/substrate/frame/balances/src/impl_currency.rs index 24a7bca36c37..454aead1773f 100644 --- a/substrate/frame/balances/src/impl_currency.rs +++ b/substrate/frame/balances/src/impl_currency.rs @@ -28,8 +28,8 @@ use frame_support::{ tokens::{fungible, BalanceStatus as Status, Fortitude::Polite, Precision::BestEffort}, Currency, DefensiveSaturating, ExistenceRequirement, ExistenceRequirement::AllowDeath, - Get, Imbalance, LockIdentifier, LockableCurrency, NamedReservableCurrency, - ReservableCurrency, SignedImbalance, TryDrop, WithdrawReasons, + Get, Imbalance, InspectLockableCurrency, LockIdentifier, LockableCurrency, + NamedReservableCurrency, ReservableCurrency, SignedImbalance, TryDrop, WithdrawReasons, }, }; use frame_system::pallet_prelude::BlockNumberFor; @@ -918,3 +918,12 @@ where Self::update_locks(who, &locks[..]); } } + +impl, I: 'static> InspectLockableCurrency for Pallet { + fn balance_locked(id: LockIdentifier, who: &T::AccountId) -> Self::Balance { + Self::locks(who) + .into_iter() + .filter(|l| l.id == id) + .fold(Zero::zero(), |acc, l| acc + l.amount) + } +} diff --git a/substrate/frame/balances/src/lib.rs b/substrate/frame/balances/src/lib.rs index 89e6b7a46963..23cbb75fc770 100644 --- a/substrate/frame/balances/src/lib.rs +++ b/substrate/frame/balances/src/lib.rs @@ -17,11 +17,15 @@ //! # Balances Pallet //! -//! The Balances pallet provides functionality for handling accounts and balances. +//! The Balances pallet provides functionality for handling accounts and balances for a single +//! token. //! -//! - [`Config`] -//! - [`Call`] -//! - [`Pallet`] +//! It makes heavy use of concepts such as Holds and Freezes from the +//! [`frame_support::traits::fungible`] traits, therefore you should read and understand those docs +//! as a prerequisite to understanding this pallet. +//! +//! Also see the [`frame_tokens`] reference docs for higher level information regarding the +//! place of this palet in FRAME. //! //! ## Overview //! @@ -38,42 +42,30 @@ //! //! ### Terminology //! -//! - **Existential Deposit:** The minimum balance required to create or keep an account open. This -//! prevents "dust accounts" from filling storage. When the free plus the reserved balance (i.e. -//! the total balance) fall below this, then the account is said to be dead; and it loses its -//! functionality as well as any prior history and all information on it is removed from the -//! chain's state. No account should ever have a total balance that is strictly between 0 and the -//! existential deposit (exclusive). If this ever happens, it indicates either a bug in this -//! pallet or an erroneous raw mutation of storage. -//! -//! - **Total Issuance:** The total number of units in existence in a system. -//! //! - **Reaping an account:** The act of removing an account by resetting its nonce. Happens after -//! its total balance has become zero (or, strictly speaking, less than the Existential Deposit). -//! -//! - **Free Balance:** The portion of a balance that is not reserved. The free balance is the only -//! balance that matters for most operations. +//! its total balance has become less than the Existential Deposit. //! -//! - **Reserved Balance:** Reserved balance still belongs to the account holder, but is suspended. -//! Reserved balance can still be slashed, but only after all the free balance has been slashed. -//! -//! - **Imbalance:** A condition when some funds were credited or debited without equal and opposite -//! accounting (i.e. a difference between total issuance and account balances). Functions that -//! result in an imbalance will return an object of the `Imbalance` trait that can be managed within -//! your runtime logic. (If an imbalance is simply dropped, it should automatically maintain any -//! book-keeping such as total issuance.) +//! ### Implementations //! -//! - **Lock:** A freeze on a specified amount of an account's free balance until a specified block -//! number. Multiple locks always operate over the same funds, so they "overlay" rather than -//! "stack". +//! The Balances pallet provides implementations for the following [`fungible`] traits. If these +//! traits provide the functionality that you need, then you should avoid tight coupling with the +//! Balances pallet. //! -//! ### Implementations +//! - [`fungible::Inspect`] +//! - [`fungible::Mutate`] +//! - [`fungible::Unbalanced`] +//! - [`fungible::Balanced`] +//! - [`fungible::BalancedHold`] +//! - [`fungible::InspectHold`] +//! - [`fungible::MutateHold`] +//! - [`fungible::InspectFreeze`] +//! - [`fungible::MutateFreeze`] +//! - [`fungible::Imbalance`] //! -//! The Balances pallet provides implementations for the following traits. If these traits provide -//! the functionality that you need, then you can avoid coupling with the Balances pallet. +//! It also implements the following [`Currency`] related traits, however they are deprecated and +//! will eventually be removed. //! -//! - [`Currency`]: Functions for dealing with a -//! fungible assets system. +//! - [`Currency`]: Functions for dealing with a fungible assets system. //! - [`ReservableCurrency`] //! - [`NamedReservableCurrency`](frame_support::traits::NamedReservableCurrency): //! Functions for dealing with assets that can be reserved from an account. @@ -83,14 +75,6 @@ //! imbalances between total issuance in the system and account balances. Must be used when a //! function creates new funds (e.g. a reward) or destroys some funds (e.g. a system fee). //! -//! ## Interface -//! -//! ### Dispatchable Functions -//! -//! - `transfer_allow_death` - Transfer some liquid free balance to another account. -//! - `force_set_balance` - Set the balances of a given account. The origin of this call must be -//! root. -//! //! ## Usage //! //! The following examples show how to use the Balances pallet in your custom pallet. @@ -151,8 +135,11 @@ //! * Total issued balanced of all accounts should be less than `Config::Balance::max_value()`. //! * Existential Deposit is set to a value greater than zero. //! -//! Note, you may find the Balances pallet still functions with an ED of zero in some circumstances, -//! however this is not a configuration which is generally supported, nor will it be. +//! Note, you may find the Balances pallet still functions with an ED of zero when the +//! `insecure_zero_ed` cargo feature is enabled. However this is not a configuration which is +//! generally supported, nor will it be. +//! +//! [`frame_tokens`]: ../polkadot_sdk_docs/reference_docs/frame_tokens/index.html #![cfg_attr(not(feature = "std"), no_std)] @@ -312,10 +299,14 @@ pub mod pallet { /// The maximum number of locks that should exist on an account. /// Not strictly enforced, but used for weight estimation. + /// + /// Use of locks is deprecated in favour of freezes. See `https://github.com/paritytech/substrate/pull/12951/` #[pallet::constant] type MaxLocks: Get; /// The maximum number of named reserves that can exist on an account. + /// + /// Use of reserves is deprecated in favour of holds. See `https://github.com/paritytech/substrate/pull/12951/` #[pallet::constant] type MaxReserves: Get; @@ -459,6 +450,8 @@ pub mod pallet { /// Any liquidity locks on some account balances. /// NOTE: Should only be accessed when setting, changing and freeing a lock. + /// + /// Use of locks is deprecated in favour of freezes. See `https://github.com/paritytech/substrate/pull/12951/` #[pallet::storage] #[pallet::getter(fn locks)] pub type Locks, I: 'static = ()> = StorageMap< @@ -470,6 +463,8 @@ pub mod pallet { >; /// Named reserves on some account balances. + /// + /// Use of reserves is deprecated in favour of holds. See `https://github.com/paritytech/substrate/pull/12951/` #[pallet::storage] #[pallet::getter(fn reserves)] pub type Reserves, I: 'static = ()> = StorageMap< @@ -681,7 +676,7 @@ pub mod pallet { /// /// This will waive the transaction fee if at least all but 10% of the accounts needed to /// be upgraded. (We let some not have to be upgraded just in order to allow for the - /// possibililty of churn). + /// possibility of churn). #[pallet::call_index(6)] #[pallet::weight(T::WeightInfo::upgrade_accounts(who.len() as u32))] pub fn upgrade_accounts( @@ -907,14 +902,14 @@ pub mod pallet { Self::try_mutate_account(who, |a, _| -> Result { Ok(f(a)) }) } - /// Returns `true` when `who` has some providers or `insecure_zero_ed` feature is disnabled. + /// Returns `true` when `who` has some providers or `insecure_zero_ed` feature is disabled. /// Returns `false` otherwise. #[cfg(not(feature = "insecure_zero_ed"))] fn have_providers_or_no_zero_ed(_: &T::AccountId) -> bool { true } - /// Returns `true` when `who` has some providers or `insecure_zero_ed` feature is disnabled. + /// Returns `true` when `who` has some providers or `insecure_zero_ed` feature is disabled. /// Returns `false` otherwise. #[cfg(feature = "insecure_zero_ed")] fn have_providers_or_no_zero_ed(who: &T::AccountId) -> bool { diff --git a/substrate/frame/balances/src/tests/currency_tests.rs b/substrate/frame/balances/src/tests/currency_tests.rs index 46a4c4caefc3..450b1a84aa87 100644 --- a/substrate/frame/balances/src/tests/currency_tests.rs +++ b/substrate/frame/balances/src/tests/currency_tests.rs @@ -24,8 +24,8 @@ use frame_support::{ BalanceStatus::{Free, Reserved}, Currency, ExistenceRequirement::{self, AllowDeath, KeepAlive}, - Hooks, LockIdentifier, LockableCurrency, NamedReservableCurrency, ReservableCurrency, - WithdrawReasons, + Hooks, InspectLockableCurrency, LockIdentifier, LockableCurrency, NamedReservableCurrency, + ReservableCurrency, WithdrawReasons, }, StorageNoopGuard, }; @@ -88,6 +88,24 @@ fn basic_locking_should_work() { }); } +#[test] +fn inspect_lock_should_work() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + Balances::set_lock(ID_1, &1, 10, WithdrawReasons::all()); + Balances::set_lock(ID_2, &1, 10, WithdrawReasons::all()); + Balances::set_lock(ID_1, &2, 20, WithdrawReasons::all()); + + assert_eq!(>::balance_locked(ID_1, &1), 10); + assert_eq!(>::balance_locked(ID_2, &1), 10); + assert_eq!(>::balance_locked(ID_1, &2), 20); + assert_eq!(>::balance_locked(ID_2, &2), 0); + assert_eq!(>::balance_locked(ID_1, &3), 0); + }) +} + #[test] fn account_should_be_reaped() { ExtBuilder::default() @@ -1024,7 +1042,7 @@ fn slash_consumed_slash_partial_works() { } #[test] -fn slash_on_non_existant_works() { +fn slash_on_non_existent_works() { ExtBuilder::default().existential_deposit(100).build_and_execute_with(|| { // Slash on non-existent account is okay. assert_eq!(Balances::slash(&12345, 1_300), (NegativeImbalance::new(0), 1300)); @@ -1071,7 +1089,7 @@ fn slash_reserved_overslash_does_not_touch_free_balance() { } #[test] -fn slash_reserved_on_non_existant_works() { +fn slash_reserved_on_non_existent_works() { ExtBuilder::default().existential_deposit(100).build_and_execute_with(|| { // Slash on non-existent account is okay. assert_eq!(Balances::slash_reserved(&12345, 1_300), (NegativeImbalance::new(0), 1300)); diff --git a/substrate/frame/balances/src/tests/reentrancy_tests.rs b/substrate/frame/balances/src/tests/reentrancy_tests.rs index 1afbe82c7e2a..717f04978577 100644 --- a/substrate/frame/balances/src/tests/reentrancy_tests.rs +++ b/substrate/frame/balances/src/tests/reentrancy_tests.rs @@ -44,7 +44,7 @@ fn transfer_dust_removal_tst1_should_work() { assert_eq!(Balances::free_balance(&2), 0); // As expected beneficiary account 3 - // received the transfered fund. + // received the transferred fund. assert_eq!(Balances::free_balance(&3), 450); // Dust balance is deposited to account 1 @@ -123,7 +123,7 @@ fn repatriating_reserved_balance_dust_removal_should_work() { // Reserve a value on account 2, // Such that free balance is lower than - // Exestintial deposit. + // Existential deposit. assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(2), 1, 450)); // Since free balance of account 2 is lower than diff --git a/substrate/frame/beefy-mmr/Cargo.toml b/substrate/frame/beefy-mmr/Cargo.toml index 9e088bae2732..e54b8e946cd2 100644 --- a/substrate/frame/beefy-mmr/Cargo.toml +++ b/substrate/frame/beefy-mmr/Cargo.toml @@ -15,7 +15,7 @@ workspace = true array-bytes = { version = "6.1", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } binary-merkle-tree = { path = "../../utils/binary-merkle-tree", default-features = false } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/beefy-mmr/src/lib.rs b/substrate/frame/beefy-mmr/src/lib.rs index 2f35d9292095..a86be5ddd25c 100644 --- a/substrate/frame/beefy-mmr/src/lib.rs +++ b/substrate/frame/beefy-mmr/src/lib.rs @@ -70,7 +70,7 @@ where ::BeefyId, >::MmrRoot(*root)), ); - >::deposit_log(digest); + frame_system::Pallet::::deposit_log(digest); } } @@ -128,7 +128,6 @@ pub mod pallet { /// Details of current BEEFY authority set. #[pallet::storage] - #[pallet::getter(fn beefy_authorities)] pub type BeefyAuthorities = StorageValue<_, BeefyAuthoritySet>, ValueQuery>; @@ -136,7 +135,6 @@ pub mod pallet { /// /// This storage entry is used as cache for calls to `update_beefy_next_authority_set`. #[pallet::storage] - #[pallet::getter(fn beefy_next_authorities)] pub type BeefyNextAuthorities = StorageValue<_, BeefyNextAuthoritySet>, ValueQuery>; } @@ -154,7 +152,7 @@ impl LeafDataProvider for Pallet { version: T::LeafVersion::get(), parent_number_and_hash: ParentNumberAndHash::::leaf_data(), leaf_extra: T::BeefyDataProvider::extra_data(), - beefy_next_authority_set: Pallet::::beefy_next_authorities(), + beefy_next_authority_set: BeefyNextAuthorities::::get(), } } } @@ -179,12 +177,12 @@ where impl Pallet { /// Return the currently active BEEFY authority set proof. pub fn authority_set_proof() -> BeefyAuthoritySet> { - Pallet::::beefy_authorities() + BeefyAuthorities::::get() } /// Return the next/queued BEEFY authority set proof. pub fn next_authority_set_proof() -> BeefyNextAuthoritySet> { - Pallet::::beefy_next_authorities() + BeefyNextAuthorities::::get() } /// Returns details of a BEEFY authority set. diff --git a/substrate/frame/beefy-mmr/src/tests.rs b/substrate/frame/beefy-mmr/src/tests.rs index ec756f83dffa..fac799bf64e4 100644 --- a/substrate/frame/beefy-mmr/src/tests.rs +++ b/substrate/frame/beefy-mmr/src/tests.rs @@ -107,7 +107,7 @@ fn should_contain_valid_leaf_data() { let mut ext = new_test_ext(vec![1, 2, 3, 4]); let parent_hash = ext.execute_with(|| { init_block(1); - >::parent_hash() + frame_system::Pallet::::parent_hash() }); let mmr_leaf = read_mmr_leaf(&mut ext, node_offchain_key(0, parent_hash)); @@ -132,7 +132,7 @@ fn should_contain_valid_leaf_data() { // build second block on top let parent_hash = ext.execute_with(|| { init_block(2); - >::parent_hash() + frame_system::Pallet::::parent_hash() }); let mmr_leaf = read_mmr_leaf(&mut ext, node_offchain_key(1, parent_hash)); diff --git a/substrate/frame/beefy/Cargo.toml b/substrate/frame/beefy/Cargo.toml index 75168eae06eb..a249d17db5a4 100644 --- a/substrate/frame/beefy/Cargo.toml +++ b/substrate/frame/beefy/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } serde = { optional = true, workspace = true, default-features = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/beefy/src/equivocation.rs b/substrate/frame/beefy/src/equivocation.rs index 2a570b0c88dd..d176e7c0afb7 100644 --- a/substrate/frame/beefy/src/equivocation.rs +++ b/substrate/frame/beefy/src/equivocation.rs @@ -191,7 +191,7 @@ where evidence: EquivocationEvidenceFor, ) -> Result<(), DispatchError> { let (equivocation_proof, key_owner_proof) = evidence; - let reporter = reporter.or_else(|| >::author()); + let reporter = reporter.or_else(|| pallet_authorship::Pallet::::author()); let offender = equivocation_proof.offender_id().clone(); // We check the equivocation within the context of its set id (and diff --git a/substrate/frame/beefy/src/lib.rs b/substrate/frame/beefy/src/lib.rs index 8087b05f0daa..1137d7caa968 100644 --- a/substrate/frame/beefy/src/lib.rs +++ b/substrate/frame/beefy/src/lib.rs @@ -121,20 +121,17 @@ pub mod pallet { /// The current authorities set #[pallet::storage] - #[pallet::getter(fn authorities)] - pub(super) type Authorities = + pub type Authorities = StorageValue<_, BoundedVec, ValueQuery>; /// The current validator set id #[pallet::storage] - #[pallet::getter(fn validator_set_id)] - pub(super) type ValidatorSetId = + pub type ValidatorSetId = StorageValue<_, sp_consensus_beefy::ValidatorSetId, ValueQuery>; /// Authorities set scheduled to be used with the next session #[pallet::storage] - #[pallet::getter(fn next_authorities)] - pub(super) type NextAuthorities = + pub type NextAuthorities = StorageValue<_, BoundedVec, ValueQuery>; /// A mapping from BEEFY set ID to the index of the *most recent* session for which its @@ -148,17 +145,14 @@ pub mod pallet { /// /// TWOX-NOTE: `ValidatorSetId` is not under user control. #[pallet::storage] - #[pallet::getter(fn session_for_set)] - pub(super) type SetIdSession = + pub type SetIdSession = StorageMap<_, Twox64Concat, sp_consensus_beefy::ValidatorSetId, SessionIndex>; /// Block number where BEEFY consensus is enabled/started. /// By changing this (through privileged `set_new_genesis()`), BEEFY consensus is effectively /// restarted from the newly set block number. #[pallet::storage] - #[pallet::getter(fn genesis_block)] - pub(super) type GenesisBlock = - StorageValue<_, Option>, ValueQuery>; + pub type GenesisBlock = StorageValue<_, Option>, ValueQuery>; #[pallet::genesis_config] pub struct GenesisConfig { @@ -187,7 +181,7 @@ pub mod pallet { // we panic here as runtime maintainers can simply reconfigure genesis and restart // the chain easily .expect("Authorities vec too big"); - >::put(&self.genesis_block); + GenesisBlock::::put(&self.genesis_block); } } @@ -287,6 +281,14 @@ pub mod pallet { } } + #[pallet::hooks] + impl Hooks> for Pallet { + #[cfg(feature = "try-runtime")] + fn try_state(_n: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { + Self::do_try_state() + } + } + #[pallet::validate_unsigned] impl ValidateUnsigned for Pallet { type Call = Call; @@ -301,11 +303,65 @@ pub mod pallet { } } +#[cfg(any(feature = "try-runtime", test))] +impl Pallet { + /// Ensure the correctness of the state of this pallet. + /// + /// This should be valid before or after each state transition of this pallet. + pub fn do_try_state() -> Result<(), sp_runtime::TryRuntimeError> { + Self::try_state_authorities()?; + Self::try_state_validators()?; + + Ok(()) + } + + /// # Invariants + /// + /// * `Authorities` should not exceed the `MaxAuthorities` capacity. + /// * `NextAuthorities` should not exceed the `MaxAuthorities` capacity. + fn try_state_authorities() -> Result<(), sp_runtime::TryRuntimeError> { + if let Some(authorities_len) = >::decode_len() { + ensure!( + authorities_len as u32 <= T::MaxAuthorities::get(), + "Authorities number exceeds what the pallet config allows." + ); + } else { + return Err(sp_runtime::TryRuntimeError::Other( + "Failed to decode length of authorities", + )); + } + + if let Some(next_authorities_len) = >::decode_len() { + ensure!( + next_authorities_len as u32 <= T::MaxAuthorities::get(), + "Next authorities number exceeds what the pallet config allows." + ); + } else { + return Err(sp_runtime::TryRuntimeError::Other( + "Failed to decode length of next authorities", + )); + } + Ok(()) + } + + /// # Invariants + /// + /// `ValidatorSetId` must be present in `SetIdSession` + fn try_state_validators() -> Result<(), sp_runtime::TryRuntimeError> { + let validator_set_id = >::get(); + ensure!( + SetIdSession::::get(validator_set_id).is_some(), + "Validator set id must be present in SetIdSession" + ); + Ok(()) + } +} + impl Pallet { /// Return the current active BEEFY validator set. pub fn validator_set() -> Option> { - let validators: BoundedVec = Self::authorities(); - let id: sp_consensus_beefy::ValidatorSetId = Self::validator_set_id(); + let validators: BoundedVec = Authorities::::get(); + let id: sp_consensus_beefy::ValidatorSetId = ValidatorSetId::::get(); ValidatorSet::::new(validators, id) } @@ -327,19 +383,19 @@ impl Pallet { new: BoundedVec, queued: BoundedVec, ) { - >::put(&new); + Authorities::::put(&new); - let new_id = Self::validator_set_id() + 1u64; - >::put(new_id); + let new_id = ValidatorSetId::::get() + 1u64; + ValidatorSetId::::put(new_id); - >::put(&queued); + NextAuthorities::::put(&queued); if let Some(validator_set) = ValidatorSet::::new(new, new_id) { let log = DigestItem::Consensus( BEEFY_ENGINE_ID, ConsensusLog::AuthoritiesChange(validator_set.clone()).encode(), ); - >::deposit_log(log); + frame_system::Pallet::::deposit_log(log); let next_id = new_id + 1; if let Some(next_validator_set) = ValidatorSet::::new(queued, next_id) { @@ -356,7 +412,7 @@ impl Pallet { return Ok(()) } - if !>::get().is_empty() { + if !Authorities::::get().is_empty() { return Err(()) } @@ -365,10 +421,10 @@ impl Pallet { .map_err(|_| ())?; let id = GENESIS_AUTHORITY_SET_ID; - >::put(bounded_authorities); - >::put(id); + Authorities::::put(bounded_authorities); + ValidatorSetId::::put(id); // Like `pallet_session`, initialize the next validator set as well. - >::put(bounded_authorities); + NextAuthorities::::put(bounded_authorities); if let Some(validator_set) = ValidatorSet::::new(authorities.clone(), id) { let next_id = id + 1; @@ -443,9 +499,9 @@ where // We want to have at least one BEEFY mandatory block per session. Self::change_authorities(bounded_next_authorities, bounded_next_queued_authorities); - let validator_set_id = Self::validator_set_id(); + let validator_set_id = ValidatorSetId::::get(); // Update the mapping for the new set id that corresponds to the latest session (i.e. now). - let session_index = >::current_index(); + let session_index = pallet_session::Pallet::::current_index(); SetIdSession::::insert(validator_set_id, &session_index); // Prune old entry if limit reached. let max_set_id_session_entries = T::MaxSetIdSessionEntries::get().max(1); @@ -460,13 +516,13 @@ where ConsensusLog::::OnDisabled(i as AuthorityIndex).encode(), ); - >::deposit_log(log); + frame_system::Pallet::::deposit_log(log); } } impl IsMember for Pallet { fn is_member(authority_id: &T::BeefyId) -> bool { - Self::authorities().iter().any(|id| id == authority_id) + Authorities::::get().iter().any(|id| id == authority_id) } } diff --git a/substrate/frame/beefy/src/mock.rs b/substrate/frame/beefy/src/mock.rs index fccc63bd1b45..1c55adc8de4b 100644 --- a/substrate/frame/beefy/src/mock.rs +++ b/substrate/frame/beefy/src/mock.rs @@ -27,7 +27,6 @@ use frame_support::{ }; use pallet_session::historical as pallet_session_historical; use sp_core::{crypto::KeyTypeId, ConstU128}; -use sp_io::TestExternalities; use sp_runtime::{ app_crypto::ecdsa::Public, curve::PiecewiseLinear, impl_opaque_keys, testing::TestXt, traits::OpaqueKeys, BuildStorage, Perbill, @@ -210,6 +209,73 @@ impl pallet_offences::Config for Test { type OnOffenceHandler = Staking; } +#[derive(Default)] +pub struct ExtBuilder { + authorities: Vec, +} + +impl ExtBuilder { + /// Add some AccountIds to insert into `List`. + #[cfg(test)] + pub(crate) fn add_authorities(mut self, ids: Vec) -> Self { + self.authorities = ids; + self + } + + pub fn build(self) -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + + let balances: Vec<_> = + (0..self.authorities.len()).map(|i| (i as u64, 10_000_000)).collect(); + + pallet_balances::GenesisConfig:: { balances } + .assimilate_storage(&mut t) + .unwrap(); + + let session_keys: Vec<_> = self + .authorities + .iter() + .enumerate() + .map(|(i, k)| (i as u64, i as u64, MockSessionKeys { dummy: k.clone() })) + .collect(); + + BasicExternalities::execute_with_storage(&mut t, || { + for (ref id, ..) in &session_keys { + frame_system::Pallet::::inc_providers(id); + } + }); + + pallet_session::GenesisConfig:: { keys: session_keys } + .assimilate_storage(&mut t) + .unwrap(); + + // controllers are same as stash + let stakers: Vec<_> = (0..self.authorities.len()) + .map(|i| (i as u64, i as u64, 10_000, pallet_staking::StakerStatus::::Validator)) + .collect(); + + let staking_config = pallet_staking::GenesisConfig:: { + stakers, + validator_count: 2, + force_era: pallet_staking::Forcing::ForceNew, + minimum_validator_count: 0, + invulnerables: vec![], + ..Default::default() + }; + + staking_config.assimilate_storage(&mut t).unwrap(); + + t.into() + } + + pub fn build_and_execute(self, test: impl FnOnce() -> ()) { + self.build().execute_with(|| { + test(); + Beefy::do_try_state().expect("All invariants must hold after a test"); + }) + } +} + // Note, that we can't use `UintAuthorityId` here. Reason is that the implementation // of `to_public_key()` assumes, that a public key is 32 bytes long. This is true for // ed25519 and sr25519 but *not* for ecdsa. A compressed ecdsa public key is 33 bytes, @@ -226,54 +292,6 @@ pub fn mock_authorities(vec: Vec) -> Vec { vec.into_iter().map(|id| mock_beefy_id(id)).collect() } -pub fn new_test_ext(ids: Vec) -> TestExternalities { - new_test_ext_raw_authorities(mock_authorities(ids)) -} - -pub fn new_test_ext_raw_authorities(authorities: Vec) -> TestExternalities { - let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - - let balances: Vec<_> = (0..authorities.len()).map(|i| (i as u64, 10_000_000)).collect(); - - pallet_balances::GenesisConfig:: { balances } - .assimilate_storage(&mut t) - .unwrap(); - - let session_keys: Vec<_> = authorities - .iter() - .enumerate() - .map(|(i, k)| (i as u64, i as u64, MockSessionKeys { dummy: k.clone() })) - .collect(); - - BasicExternalities::execute_with_storage(&mut t, || { - for (ref id, ..) in &session_keys { - frame_system::Pallet::::inc_providers(id); - } - }); - - pallet_session::GenesisConfig:: { keys: session_keys } - .assimilate_storage(&mut t) - .unwrap(); - - // controllers are same as stash - let stakers: Vec<_> = (0..authorities.len()) - .map(|i| (i as u64, i as u64, 10_000, pallet_staking::StakerStatus::::Validator)) - .collect(); - - let staking_config = pallet_staking::GenesisConfig:: { - stakers, - validator_count: 2, - force_era: pallet_staking::Forcing::ForceNew, - minimum_validator_count: 0, - invulnerables: vec![], - ..Default::default() - }; - - staking_config.assimilate_storage(&mut t).unwrap(); - - t.into() -} - pub fn start_session(session_index: SessionIndex) { for i in Session::current_index()..session_index { System::on_finalize(System::block_number()); diff --git a/substrate/frame/beefy/src/tests.rs b/substrate/frame/beefy/src/tests.rs index 453cf19a4fe1..6a6aa245ce1f 100644 --- a/substrate/frame/beefy/src/tests.rs +++ b/substrate/frame/beefy/src/tests.rs @@ -31,7 +31,7 @@ use sp_consensus_beefy::{ }; use sp_runtime::DigestItem; -use crate::{mock::*, Call, Config, Error, Weight, WeightInfo}; +use crate::{self as beefy, mock::*, Call, Config, Error, Weight, WeightInfo}; fn init_block(block: u64) { System::set_block_number(block); @@ -47,16 +47,16 @@ fn genesis_session_initializes_authorities() { let authorities = mock_authorities(vec![1, 2, 3, 4]); let want = authorities.clone(); - new_test_ext_raw_authorities(authorities).execute_with(|| { - let authorities = Beefy::authorities(); + ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { + let authorities = beefy::Authorities::::get(); assert_eq!(authorities.len(), 4); assert_eq!(want[0], authorities[0]); assert_eq!(want[1], authorities[1]); - assert!(Beefy::validator_set_id() == 0); + assert!(beefy::ValidatorSetId::::get() == 0); - let next_authorities = Beefy::next_authorities(); + let next_authorities = beefy::NextAuthorities::::get(); assert_eq!(next_authorities.len(), 4); assert_eq!(want[0], next_authorities[0]); @@ -69,130 +69,140 @@ fn session_change_updates_authorities() { let authorities = mock_authorities(vec![1, 2, 3, 4]); let want_validators = authorities.clone(); - new_test_ext(vec![1, 2, 3, 4]).execute_with(|| { - assert!(0 == Beefy::validator_set_id()); + ExtBuilder::default() + .add_authorities(mock_authorities(vec![1, 2, 3, 4])) + .build_and_execute(|| { + assert!(0 == beefy::ValidatorSetId::::get()); - init_block(1); + init_block(1); - assert!(1 == Beefy::validator_set_id()); + assert!(1 == beefy::ValidatorSetId::::get()); - let want = beefy_log(ConsensusLog::AuthoritiesChange( - ValidatorSet::new(want_validators, 1).unwrap(), - )); + let want = beefy_log(ConsensusLog::AuthoritiesChange( + ValidatorSet::new(want_validators, 1).unwrap(), + )); - let log = System::digest().logs[0].clone(); - assert_eq!(want, log); + let log = System::digest().logs[0].clone(); + assert_eq!(want, log); - init_block(2); + init_block(2); - assert!(2 == Beefy::validator_set_id()); + assert!(2 == beefy::ValidatorSetId::::get()); - let want = beefy_log(ConsensusLog::AuthoritiesChange( - ValidatorSet::new(vec![mock_beefy_id(2), mock_beefy_id(4)], 2).unwrap(), - )); + let want = beefy_log(ConsensusLog::AuthoritiesChange( + ValidatorSet::new(vec![mock_beefy_id(2), mock_beefy_id(4)], 2).unwrap(), + )); - let log = System::digest().logs[1].clone(); - assert_eq!(want, log); - }); + let log = System::digest().logs[1].clone(); + assert_eq!(want, log); + }); } #[test] fn session_change_updates_next_authorities() { let want = vec![mock_beefy_id(1), mock_beefy_id(2), mock_beefy_id(3), mock_beefy_id(4)]; - new_test_ext(vec![1, 2, 3, 4]).execute_with(|| { - let next_authorities = Beefy::next_authorities(); + ExtBuilder::default() + .add_authorities(mock_authorities(vec![1, 2, 3, 4])) + .build_and_execute(|| { + let next_authorities = beefy::NextAuthorities::::get(); - assert_eq!(next_authorities.len(), 4); - assert_eq!(want[0], next_authorities[0]); - assert_eq!(want[1], next_authorities[1]); - assert_eq!(want[2], next_authorities[2]); - assert_eq!(want[3], next_authorities[3]); + assert_eq!(next_authorities.len(), 4); + assert_eq!(want[0], next_authorities[0]); + assert_eq!(want[1], next_authorities[1]); + assert_eq!(want[2], next_authorities[2]); + assert_eq!(want[3], next_authorities[3]); - init_block(1); + init_block(1); - let next_authorities = Beefy::next_authorities(); + let next_authorities = beefy::NextAuthorities::::get(); - assert_eq!(next_authorities.len(), 2); - assert_eq!(want[1], next_authorities[0]); - assert_eq!(want[3], next_authorities[1]); - }); + assert_eq!(next_authorities.len(), 2); + assert_eq!(want[1], next_authorities[0]); + assert_eq!(want[3], next_authorities[1]); + }); } #[test] fn validator_set_at_genesis() { let want = vec![mock_beefy_id(1), mock_beefy_id(2)]; - new_test_ext(vec![1, 2, 3, 4]).execute_with(|| { - let vs = Beefy::validator_set().unwrap(); + ExtBuilder::default() + .add_authorities(mock_authorities(vec![1, 2, 3, 4])) + .build_and_execute(|| { + let vs = Beefy::validator_set().unwrap(); - assert_eq!(vs.id(), 0u64); - assert_eq!(vs.validators()[0], want[0]); - assert_eq!(vs.validators()[1], want[1]); - }); + assert_eq!(vs.id(), 0u64); + assert_eq!(vs.validators()[0], want[0]); + assert_eq!(vs.validators()[1], want[1]); + }); } #[test] fn validator_set_updates_work() { let want = vec![mock_beefy_id(1), mock_beefy_id(2), mock_beefy_id(3), mock_beefy_id(4)]; - new_test_ext(vec![1, 2, 3, 4]).execute_with(|| { - let vs = Beefy::validator_set().unwrap(); - assert_eq!(vs.id(), 0u64); - assert_eq!(want[0], vs.validators()[0]); - assert_eq!(want[1], vs.validators()[1]); - assert_eq!(want[2], vs.validators()[2]); - assert_eq!(want[3], vs.validators()[3]); + ExtBuilder::default() + .add_authorities(mock_authorities(vec![1, 2, 3, 4])) + .build_and_execute(|| { + let vs = Beefy::validator_set().unwrap(); + assert_eq!(vs.id(), 0u64); + assert_eq!(want[0], vs.validators()[0]); + assert_eq!(want[1], vs.validators()[1]); + assert_eq!(want[2], vs.validators()[2]); + assert_eq!(want[3], vs.validators()[3]); - init_block(1); + init_block(1); - let vs = Beefy::validator_set().unwrap(); + let vs = Beefy::validator_set().unwrap(); - assert_eq!(vs.id(), 1u64); - assert_eq!(want[0], vs.validators()[0]); - assert_eq!(want[1], vs.validators()[1]); + assert_eq!(vs.id(), 1u64); + assert_eq!(want[0], vs.validators()[0]); + assert_eq!(want[1], vs.validators()[1]); - init_block(2); + init_block(2); - let vs = Beefy::validator_set().unwrap(); + let vs = Beefy::validator_set().unwrap(); - assert_eq!(vs.id(), 2u64); - assert_eq!(want[1], vs.validators()[0]); - assert_eq!(want[3], vs.validators()[1]); - }); + assert_eq!(vs.id(), 2u64); + assert_eq!(want[1], vs.validators()[0]); + assert_eq!(want[3], vs.validators()[1]); + }); } #[test] fn cleans_up_old_set_id_session_mappings() { - new_test_ext(vec![1, 2, 3, 4]).execute_with(|| { - let max_set_id_session_entries = MaxSetIdSessionEntries::get(); - - // we have 3 sessions per era - let era_limit = max_set_id_session_entries / 3; - // sanity check against division precision loss - assert_eq!(0, max_set_id_session_entries % 3); - // go through `max_set_id_session_entries` sessions - start_era(era_limit); - - // we should have a session id mapping for all the set ids from - // `max_set_id_session_entries` eras we have observed - for i in 1..=max_set_id_session_entries { - assert!(Beefy::session_for_set(i as u64).is_some()); - } + ExtBuilder::default() + .add_authorities(mock_authorities(vec![1, 2, 3, 4])) + .build_and_execute(|| { + let max_set_id_session_entries = MaxSetIdSessionEntries::get(); + + // we have 3 sessions per era + let era_limit = max_set_id_session_entries / 3; + // sanity check against division precision loss + assert_eq!(0, max_set_id_session_entries % 3); + // go through `max_set_id_session_entries` sessions + start_era(era_limit); + + // we should have a session id mapping for all the set ids from + // `max_set_id_session_entries` eras we have observed + for i in 1..=max_set_id_session_entries { + assert!(beefy::SetIdSession::::get(i as u64).is_some()); + } - // go through another `max_set_id_session_entries` sessions - start_era(era_limit * 2); + // go through another `max_set_id_session_entries` sessions + start_era(era_limit * 2); - // we should keep tracking the new mappings for new sessions - for i in max_set_id_session_entries + 1..=max_set_id_session_entries * 2 { - assert!(Beefy::session_for_set(i as u64).is_some()); - } + // we should keep tracking the new mappings for new sessions + for i in max_set_id_session_entries + 1..=max_set_id_session_entries * 2 { + assert!(beefy::SetIdSession::::get(i as u64).is_some()); + } - // but the old ones should have been pruned by now - for i in 1..=max_set_id_session_entries { - assert!(Beefy::session_for_set(i as u64).is_none()); - } - }); + // but the old ones should have been pruned by now + for i in 1..=max_set_id_session_entries { + assert!(beefy::SetIdSession::::get(i as u64).is_none()); + } + }); } /// Returns a list with 3 authorities with known keys: @@ -259,7 +269,7 @@ fn should_sign_and_verify() { fn report_equivocation_current_set_works() { let authorities = test_authorities(); - new_test_ext_raw_authorities(authorities).execute_with(|| { + ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { assert_eq!(Staking::current_era(), Some(0)); assert_eq!(Session::current_index(), 0); @@ -339,7 +349,7 @@ fn report_equivocation_current_set_works() { fn report_equivocation_old_set_works() { let authorities = test_authorities(); - new_test_ext_raw_authorities(authorities).execute_with(|| { + ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { start_era(1); let block_num = System::block_number(); @@ -422,7 +432,7 @@ fn report_equivocation_old_set_works() { fn report_equivocation_invalid_set_id() { let authorities = test_authorities(); - new_test_ext_raw_authorities(authorities).execute_with(|| { + ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { start_era(1); let block_num = System::block_number(); @@ -460,7 +470,7 @@ fn report_equivocation_invalid_set_id() { fn report_equivocation_invalid_session() { let authorities = test_authorities(); - new_test_ext_raw_authorities(authorities).execute_with(|| { + ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { start_era(1); let block_num = System::block_number(); @@ -503,7 +513,7 @@ fn report_equivocation_invalid_session() { fn report_equivocation_invalid_key_owner_proof() { let authorities = test_authorities(); - new_test_ext_raw_authorities(authorities).execute_with(|| { + ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { start_era(1); let block_num = System::block_number(); @@ -551,7 +561,7 @@ fn report_equivocation_invalid_key_owner_proof() { fn report_equivocation_invalid_equivocation_proof() { let authorities = test_authorities(); - new_test_ext_raw_authorities(authorities).execute_with(|| { + ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { start_era(1); let block_num = System::block_number(); @@ -624,7 +634,7 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { let authorities = test_authorities(); - new_test_ext_raw_authorities(authorities).execute_with(|| { + ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { start_era(1); let block_num = System::block_number(); @@ -728,7 +738,7 @@ fn report_equivocation_has_valid_weight() { fn valid_equivocation_reports_dont_pay_fees() { let authorities = test_authorities(); - new_test_ext_raw_authorities(authorities).execute_with(|| { + ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { start_era(1); let block_num = System::block_number(); @@ -796,7 +806,7 @@ fn valid_equivocation_reports_dont_pay_fees() { fn set_new_genesis_works() { let authorities = test_authorities(); - new_test_ext_raw_authorities(authorities).execute_with(|| { + ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { start_era(1); let new_genesis_delay = 10u64; @@ -804,7 +814,7 @@ fn set_new_genesis_works() { assert_ok!(Beefy::set_new_genesis(RuntimeOrigin::root(), new_genesis_delay,)); let expected = System::block_number() + new_genesis_delay; // verify new genesis was set - assert_eq!(Beefy::genesis_block(), Some(expected)); + assert_eq!(beefy::GenesisBlock::::get(), Some(expected)); // setting delay < 1 should fail assert_err!( diff --git a/substrate/frame/benchmarking/Cargo.toml b/substrate/frame/benchmarking/Cargo.toml index dc80ad310e60..9dcf73488c1d 100644 --- a/substrate/frame/benchmarking/Cargo.toml +++ b/substrate/frame/benchmarking/Cargo.toml @@ -20,7 +20,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = linregress = { version = "0.5.1", optional = true } log = { workspace = true } paste = "1.0" -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } frame-support = { path = "../support", default-features = false } frame-support-procedural = { path = "../support/procedural", default-features = false } diff --git a/substrate/frame/benchmarking/README.md b/substrate/frame/benchmarking/README.md index bf0bde2c3df5..0b3680e11546 100644 --- a/substrate/frame/benchmarking/README.md +++ b/substrate/frame/benchmarking/README.md @@ -177,8 +177,8 @@ requirements. The benchmarking CLI uses a Handlebars template to format the final output file. You can optionally pass the flag `--template` pointing to a custom template that can be used instead. Within the template, you have access to all the data provided by the `TemplateData` struct in the [benchmarking CLI -writer](../../utils/frame/benchmarking-cli/src/writer.rs). You can find the default template used -[here](../../utils/frame/benchmarking-cli/src/template.hbs). +writer](../../utils/frame/benchmarking-cli/src/pallet/writer.rs). You can find the default template used +[here](../../utils/frame/benchmarking-cli/src/pallet/template.hbs). There are some custom Handlebars helpers included with our output generation: diff --git a/substrate/frame/benchmarking/pov/Cargo.toml b/substrate/frame/benchmarking/pov/Cargo.toml index 9acafab43cd4..41eb61c81570 100644 --- a/substrate/frame/benchmarking/pov/Cargo.toml +++ b/substrate/frame/benchmarking/pov/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "..", default-features = false } frame-support = { path = "../../support", default-features = false } frame-system = { path = "../../system", default-features = false } diff --git a/substrate/frame/benchmarking/pov/src/benchmarking.rs b/substrate/frame/benchmarking/pov/src/benchmarking.rs index cd6d262ebe57..bf3d406d0b2b 100644 --- a/substrate/frame/benchmarking/pov/src/benchmarking.rs +++ b/substrate/frame/benchmarking/pov/src/benchmarking.rs @@ -21,54 +21,78 @@ use super::*; +use frame_benchmarking::v2::*; use frame_support::traits::UnfilteredDispatchable; use frame_system::{Pallet as System, RawOrigin}; use sp_runtime::traits::Hash; -frame_benchmarking::benchmarks! { - storage_single_value_read { +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn storage_single_value_read() { Value::::put(123); - }: { - assert_eq!(Value::::get(), Some(123)); + + #[block] + { + assert_eq!(Value::::get(), Some(123)); + } } - #[pov_mode = Ignored] - storage_single_value_ignored_read { + #[benchmark(pov_mode = Ignored)] + fn storage_single_value_ignored_read() { Value::::put(123); - }: { - assert_eq!(Value::::get(), Some(123)); + #[block] + { + assert_eq!(Value::::get(), Some(123)); + } } - #[pov_mode = MaxEncodedLen { + #[benchmark(pov_mode = MaxEncodedLen { Pov::Value2: Ignored - }] - storage_single_value_ignored_some_read { + })] + fn storage_single_value_ignored_some_read() { Value::::put(123); Value2::::put(123); - }: { - assert_eq!(Value::::get(), Some(123)); - assert_eq!(Value2::::get(), Some(123)); + + #[block] + { + assert_eq!(Value::::get(), Some(123)); + assert_eq!(Value2::::get(), Some(123)); + } } - storage_single_value_read_twice { + #[benchmark] + fn storage_single_value_read_twice() { Value::::put(123); - }: { - assert_eq!(Value::::get(), Some(123)); - assert_eq!(Value::::get(), Some(123)); + + #[block] + { + assert_eq!(Value::::get(), Some(123)); + assert_eq!(Value::::get(), Some(123)); + } } - storage_single_value_write { - }: { - Value::::put(123); - } verify { + #[benchmark] + fn storage_single_value_write() { + #[block] + { + Value::::put(123); + } + assert_eq!(Value::::get(), Some(123)); } - storage_single_value_kill { + #[benchmark] + fn storage_single_value_kill() { Value::::put(123); - }: { - Value::::kill(); - } verify { + + #[block] + { + Value::::kill(); + } + assert!(!Value::::exists()); } @@ -78,263 +102,297 @@ frame_benchmarking::benchmarks! { // created. Then the one value is read from the map. This demonstrates that the number of other // nodes in the Trie influences the proof size. The number of inserted nodes can be interpreted // as the number of `StorageMap`/`StorageValue` in the whole runtime. - #[pov_mode = Measured] - storage_1m_map_read_one_value_two_additional_layers { - (0..(1<<10)).for_each(|i| Map1M::::insert(i, i)); + #[benchmark(pov_mode = Measured)] + fn storage_1m_map_read_one_value_two_additional_layers() { + (0..(1 << 10)).for_each(|i| Map1M::::insert(i, i)); // Assume there are 16-256 other storage items. - (0..(1u32<<4)).for_each(|i| { + (0..(1u32 << 4)).for_each(|i| { let k = T::Hashing::hash(&i.to_be_bytes()); frame_support::storage::unhashed::put(k.as_ref(), &i); }); - }: { - assert_eq!(Map1M::::get(1<<9), Some(1<<9)); + + #[block] + { + assert_eq!(Map1M::::get(1 << 9), Some(1 << 9)); + } } - #[pov_mode = Measured] - storage_1m_map_read_one_value_three_additional_layers { - (0..(1<<10)).for_each(|i| Map1M::::insert(i, i)); + #[benchmark(pov_mode = Measured)] + fn storage_1m_map_read_one_value_three_additional_layers() { + (0..(1 << 10)).for_each(|i| Map1M::::insert(i, i)); // Assume there are 256-4096 other storage items. - (0..(1u32<<8)).for_each(|i| { + (0..(1u32 << 8)).for_each(|i| { let k = T::Hashing::hash(&i.to_be_bytes()); frame_support::storage::unhashed::put(k.as_ref(), &i); }); - }: { - assert_eq!(Map1M::::get(1<<9), Some(1<<9)); + + #[block] + { + assert_eq!(Map1M::::get(1 << 9), Some(1 << 9)); + } } - #[pov_mode = Measured] - storage_1m_map_read_one_value_four_additional_layers { - (0..(1<<10)).for_each(|i| Map1M::::insert(i, i)); + #[benchmark(pov_mode = Measured)] + fn storage_1m_map_read_one_value_four_additional_layers() { + (0..(1 << 10)).for_each(|i| Map1M::::insert(i, i)); // Assume there are 4096-65536 other storage items. - (0..(1u32<<12)).for_each(|i| { + (0..(1u32 << 12)).for_each(|i| { let k = T::Hashing::hash(&i.to_be_bytes()); frame_support::storage::unhashed::put(k.as_ref(), &i); }); - }: { - assert_eq!(Map1M::::get(1<<9), Some(1<<9)); + + #[block] + { + assert_eq!(Map1M::::get(1 << 9), Some(1 << 9)); + } } // Reads from both storage maps each `n` and `m` times. Should result in two linear components. - storage_map_read_per_component { - let n in 0 .. 100; - let m in 0 .. 100; + #[benchmark] + fn storage_map_read_per_component(n: Linear<0, 100>, m: Linear<0, 100>) { + (0..m * 10).for_each(|i| Map1M::::insert(i, i)); + (0..n * 10).for_each(|i| Map16M::::insert(i, i)); - (0..m*10).for_each(|i| Map1M::::insert(i, i)); - (0..n*10).for_each(|i| Map16M::::insert(i, i)); - }: { - (0..m).for_each(|i| - assert_eq!(Map1M::::get(i*10), Some(i*10))); - (0..n).for_each(|i| - assert_eq!(Map16M::::get(i*10), Some(i*10))); + #[block] + { + (0..m).for_each(|i| assert_eq!(Map1M::::get(i * 10), Some(i * 10))); + (0..n).for_each(|i| assert_eq!(Map16M::::get(i * 10), Some(i * 10))); + } } - #[pov_mode = MaxEncodedLen { + #[benchmark(pov_mode = MaxEncodedLen { Pov::Map1M: Ignored - }] - storage_map_read_per_component_one_ignored { - let n in 0 .. 100; - let m in 0 .. 100; + })] + fn storage_map_read_per_component_one_ignored(n: Linear<0, 100>, m: Linear<0, 100>) { + (0..m * 10).for_each(|i| Map1M::::insert(i, i)); + (0..n * 10).for_each(|i| Map16M::::insert(i, i)); - (0..m*10).for_each(|i| Map1M::::insert(i, i)); - (0..n*10).for_each(|i| Map16M::::insert(i, i)); - }: { - (0..m).for_each(|i| - assert_eq!(Map1M::::get(i*10), Some(i*10))); - (0..n).for_each(|i| - assert_eq!(Map16M::::get(i*10), Some(i*10))); + #[block] + { + (0..m).for_each(|i| assert_eq!(Map1M::::get(i * 10), Some(i * 10))); + (0..n).for_each(|i| assert_eq!(Map16M::::get(i * 10), Some(i * 10))); + } } // Reads the same value from a storage map. Should not result in a component. - storage_1m_map_one_entry_repeated_read { - let n in 0 .. 100; + #[benchmark] + fn storage_1m_map_one_entry_repeated_read(n: Linear<0, 100>) { Map1M::::insert(0, 0); - }: { - (0..n).for_each(|i| - assert_eq!(Map1M::::get(0), Some(0))); + + #[block] + { + (0..n).for_each(|_| assert_eq!(Map1M::::get(0), Some(0))); + } } // Reads the same values from a storage map. Should result in a `1x` linear component. - storage_1m_map_multiple_entry_repeated_read { - let n in 0 .. 100; + #[benchmark] + fn storage_1m_map_multiple_entry_repeated_read(n: Linear<0, 100>) { (0..n).for_each(|i| Map1M::::insert(i, i)); - }: { - (0..n).for_each(|i| { - // Reading the same value 10 times does nothing. - (0..10).for_each(|j| - assert_eq!(Map1M::::get(i), Some(i))); - }); + + #[block] + { + (0..n).for_each(|i| { + // Reading the same value 10 times does nothing. + (0..10).for_each(|_| assert_eq!(Map1M::::get(i), Some(i))); + }); + } } - storage_1m_double_map_read_per_component { - let n in 0 .. 1024; - (0..(1<<10)).for_each(|i| DoubleMap1M::::insert(i, i, i)); - }: { - (0..n).for_each(|i| - assert_eq!(DoubleMap1M::::get(i, i), Some(i))); + #[benchmark] + fn storage_1m_double_map_read_per_component(n: Linear<0, 1024>) { + (0..(1 << 10)).for_each(|i| DoubleMap1M::::insert(i, i, i)); + + #[block] + { + (0..n).for_each(|i| assert_eq!(DoubleMap1M::::get(i, i), Some(i))); + } } - storage_value_bounded_read { - }: { - assert!(BoundedValue::::get().is_none()); + #[benchmark] + fn storage_value_bounded_read() { + #[block] + { + assert!(BoundedValue::::get().is_none()); + } } // Reading unbounded values will produce no mathematical worst case PoV size for this component. - storage_value_unbounded_read { - }: { - assert!(UnboundedValue::::get().is_none()); + #[benchmark] + fn storage_value_unbounded_read() { + #[block] + { + assert!(UnboundedValue::::get().is_none()); + } } - #[pov_mode = Ignored] - storage_value_unbounded_ignored_read { - }: { - assert!(UnboundedValue::::get().is_none()); + #[benchmark(pov_mode = Ignored)] + fn storage_value_unbounded_ignored_read() { + #[block] + { + assert!(UnboundedValue::::get().is_none()); + } } // Same as above, but we still expect a mathematical worst case PoV size for the bounded one. - storage_value_bounded_and_unbounded_read { + #[benchmark] + fn storage_value_bounded_and_unbounded_read() { (0..1024).for_each(|i| Map1M::::insert(i, i)); - }: { - assert!(UnboundedValue::::get().is_none()); - assert!(BoundedValue::::get().is_none()); + #[block] + { + assert!(UnboundedValue::::get().is_none()); + assert!(BoundedValue::::get().is_none()); + } } - #[pov_mode = Measured] - measured_storage_value_read_linear_size { - let l in 0 .. 1<<22; + #[benchmark(pov_mode = Measured)] + fn measured_storage_value_read_linear_size(l: Linear<0, { 1 << 22 }>) { let v: sp_runtime::BoundedVec = alloc::vec![0u8; l as usize].try_into().unwrap(); LargeValue::::put(&v); - }: { - assert!(LargeValue::::get().is_some()); + #[block] + { + assert!(LargeValue::::get().is_some()); + } } - #[pov_mode = MaxEncodedLen] - mel_storage_value_read_linear_size { - let l in 0 .. 1<<22; + #[benchmark(pov_mode = MaxEncodedLen)] + fn mel_storage_value_read_linear_size(l: Linear<0, { 1 << 22 }>) { let v: sp_runtime::BoundedVec = alloc::vec![0u8; l as usize].try_into().unwrap(); LargeValue::::put(&v); - }: { - assert!(LargeValue::::get().is_some()); + #[block] + { + assert!(LargeValue::::get().is_some()); + } } - #[pov_mode = Measured] - measured_storage_double_value_read_linear_size { - let l in 0 .. 1<<22; + #[benchmark(pov_mode = Measured)] + fn measured_storage_double_value_read_linear_size(l: Linear<0, { 1 << 22 }>) { let v: sp_runtime::BoundedVec = alloc::vec![0u8; l as usize].try_into().unwrap(); LargeValue::::put(&v); LargeValue2::::put(&v); - }: { - assert!(LargeValue::::get().is_some()); - assert!(LargeValue2::::get().is_some()); + #[block] + { + assert!(LargeValue::::get().is_some()); + assert!(LargeValue2::::get().is_some()); + } } - #[pov_mode = MaxEncodedLen] - mel_storage_double_value_read_linear_size { - let l in 0 .. 1<<22; + #[benchmark(pov_mode = MaxEncodedLen)] + fn mel_storage_double_value_read_linear_size(l: Linear<0, { 1 << 22 }>) { let v: sp_runtime::BoundedVec = alloc::vec![0u8; l as usize].try_into().unwrap(); LargeValue::::put(&v); LargeValue2::::put(&v); - }: { - assert!(LargeValue::::get().is_some()); - assert!(LargeValue2::::get().is_some()); + #[block] + { + assert!(LargeValue::::get().is_some()); + assert!(LargeValue2::::get().is_some()); + } } - #[pov_mode = MaxEncodedLen { + #[benchmark(pov_mode = MaxEncodedLen { Pov::LargeValue2: Measured - }] - mel_mixed_storage_double_value_read_linear_size { - let l in 0 .. 1<<22; + })] + fn mel_mixed_storage_double_value_read_linear_size(l: Linear<0, { 1 << 22 }>) { let v: sp_runtime::BoundedVec = alloc::vec![0u8; l as usize].try_into().unwrap(); LargeValue::::put(&v); LargeValue2::::put(&v); - }: { - assert!(LargeValue::::get().is_some()); - assert!(LargeValue2::::get().is_some()); + #[block] + { + assert!(LargeValue::::get().is_some()); + assert!(LargeValue2::::get().is_some()); + } } - #[pov_mode = Measured { + #[benchmark(pov_mode = Measured { Pov::LargeValue2: MaxEncodedLen - }] - measured_mixed_storage_double_value_read_linear_size { - let l in 0 .. 1<<22; + })] + fn measured_mixed_storage_double_value_read_linear_size(l: Linear<0, { 1 << 22 }>) { let v: sp_runtime::BoundedVec = alloc::vec![0u8; l as usize].try_into().unwrap(); LargeValue::::put(&v); LargeValue2::::put(&v); - }: { - assert!(LargeValue::::get().is_some()); - assert!(LargeValue2::::get().is_some()); + #[block] + { + assert!(LargeValue::::get().is_some()); + assert!(LargeValue2::::get().is_some()); + } } - #[pov_mode = Measured] - storage_map_unbounded_both_measured_read { - let i in 0 .. 1000; - + #[benchmark(pov_mode = Measured)] + fn storage_map_unbounded_both_measured_read(i: Linear<0, 1000>) { UnboundedMap::::insert(i, alloc::vec![0; i as usize]); UnboundedMap2::::insert(i, alloc::vec![0; i as usize]); - }: { - assert!(UnboundedMap::::get(i).is_some()); - assert!(UnboundedMap2::::get(i).is_some()); + #[block] + { + assert!(UnboundedMap::::get(i).is_some()); + assert!(UnboundedMap2::::get(i).is_some()); + } } - #[pov_mode = MaxEncodedLen { + #[benchmark(pov_mode = MaxEncodedLen { Pov::UnboundedMap: Measured - }] - storage_map_partial_unbounded_read { - let i in 0 .. 1000; - + })] + fn storage_map_partial_unbounded_read(i: Linear<0, 1000>) { Map1M::::insert(i, 0); UnboundedMap::::insert(i, alloc::vec![0; i as usize]); - }: { - assert!(Map1M::::get(i).is_some()); - assert!(UnboundedMap::::get(i).is_some()); + #[block] + { + assert!(Map1M::::get(i).is_some()); + assert!(UnboundedMap::::get(i).is_some()); + } } - #[pov_mode = MaxEncodedLen { + #[benchmark(pov_mode = MaxEncodedLen { Pov::UnboundedMap: Ignored - }] - storage_map_partial_unbounded_ignored_read { - let i in 0 .. 1000; - + })] + fn storage_map_partial_unbounded_ignored_read(i: Linear<0, 1000>) { Map1M::::insert(i, 0); UnboundedMap::::insert(i, alloc::vec![0; i as usize]); - }: { - assert!(Map1M::::get(i).is_some()); - assert!(UnboundedMap::::get(i).is_some()); + #[block] + { + assert!(Map1M::::get(i).is_some()); + assert!(UnboundedMap::::get(i).is_some()); + } } // Emitting an event will not incur any PoV. - emit_event { + #[benchmark] + fn emit_event() { // Emit a single event. - let call = Call::::emit_event { }; - }: { call.dispatch_bypass_filter(RawOrigin::Root.into()).unwrap(); } - verify { + let call = Call::::emit_event {}; + #[block] + { + call.dispatch_bypass_filter(RawOrigin::Root.into()).unwrap(); + } assert_eq!(System::::events().len(), 1); } // A No-OP will not incur any PoV. - noop { - let call = Call::::noop { }; - }: { - call.dispatch_bypass_filter(RawOrigin::Root.into()).unwrap(); + #[benchmark] + fn noop() { + let call = Call::::noop {}; + #[block] + { + call.dispatch_bypass_filter(RawOrigin::Root.into()).unwrap(); + } } - storage_iteration { + #[benchmark] + fn storage_iteration() { for i in 0..65000 { UnboundedMapTwox::::insert(i, alloc::vec![0; 64]); } - }: { - for (key, value) in UnboundedMapTwox::::iter() { - unsafe { - core::ptr::read_volatile(&key); - core::ptr::read_volatile(value.as_ptr()); + #[block] + { + for (key, value) in UnboundedMapTwox::::iter() { + unsafe { + core::ptr::read_volatile(&key); + core::ptr::read_volatile(value.as_ptr()); + } } } } - impl_benchmark_test_suite!( - Pallet, - mock::new_test_ext(), - mock::Test, - ); + impl_benchmark_test_suite!(Pallet, super::mock::new_test_ext(), super::mock::Test,); } #[cfg(test)] diff --git a/substrate/frame/benchmarking/pov/src/weights.rs b/substrate/frame/benchmarking/pov/src/weights.rs index d84ac88c98f0..c4fc03d1dd93 100644 --- a/substrate/frame/benchmarking/pov/src/weights.rs +++ b/substrate/frame/benchmarking/pov/src/weights.rs @@ -15,38 +15,36 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for frame_benchmarking_pallet_pov +//! Autogenerated weights for `frame_benchmarking_pallet_pov` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` -//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 +//! HOSTNAME: `Olivers-MBP`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: `1024` // Executed Command: -// ./target/release/substrate +// target/release/substrate-node // benchmark // pallet -// --dev // --pallet // frame-benchmarking-pallet-pov // --extrinsic // -// --steps -// 50 -// --repeat -// 20 -// --template=.maintain/frame-weight-template.hbs -// --output=frame/benchmarking/pov/src/weights.rs +// --output +// substrate/frame/benchmarking/pov/src/weights.rs +// --template +// substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for frame_benchmarking_pallet_pov. +/// Weight functions needed for `frame_benchmarking_pallet_pov`. pub trait WeightInfo { fn storage_single_value_read() -> Weight; fn storage_single_value_ignored_read() -> Weight; @@ -80,361 +78,361 @@ pub trait WeightInfo { fn storage_iteration() -> Weight; } -/// Weights for frame_benchmarking_pallet_pov using the Substrate node and recommended hardware. +/// Weights for `frame_benchmarking_pallet_pov` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Pov Value (r:1 w:0) - /// Proof: Pov Value (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Pov::Value` (r:1 w:0) + /// Proof: `Pov::Value` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn storage_single_value_read() -> Weight { // Proof Size summary in bytes: // Measured: `136` // Estimated: `1489` - // Minimum execution time: 1_706_000 picoseconds. - Weight::from_parts(1_788_000, 1489) + // Minimum execution time: 3_000_000 picoseconds. + Weight::from_parts(3_000_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) } - /// Storage: Pov Value (r:1 w:0) - /// Proof: Pov Value (max_values: Some(1), max_size: Some(4), added: 499, mode: Ignored) + /// Storage: `Pov::Value` (r:1 w:0) + /// Proof: `Pov::Value` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Ignored`) fn storage_single_value_ignored_read() -> Weight { // Proof Size summary in bytes: // Measured: `136` // Estimated: `0` - // Minimum execution time: 1_661_000 picoseconds. - Weight::from_parts(1_718_000, 0) + // Minimum execution time: 3_000_000 picoseconds. + Weight::from_parts(3_000_000, 0) .saturating_add(T::DbWeight::get().reads(1_u64)) } - /// Storage: Pov Value (r:1 w:0) - /// Proof: Pov Value (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Pov Value2 (r:1 w:0) - /// Proof: Pov Value2 (max_values: Some(1), max_size: Some(4), added: 499, mode: Ignored) + /// Storage: `Pov::Value` (r:1 w:0) + /// Proof: `Pov::Value` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Pov::Value2` (r:1 w:0) + /// Proof: `Pov::Value2` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Ignored`) fn storage_single_value_ignored_some_read() -> Weight { // Proof Size summary in bytes: // Measured: `160` // Estimated: `1489` - // Minimum execution time: 2_226_000 picoseconds. - Weight::from_parts(2_365_000, 1489) + // Minimum execution time: 4_000_000 picoseconds. + Weight::from_parts(4_000_000, 1489) .saturating_add(T::DbWeight::get().reads(2_u64)) } - /// Storage: Pov Value (r:1 w:0) - /// Proof: Pov Value (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Pov::Value` (r:1 w:0) + /// Proof: `Pov::Value` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn storage_single_value_read_twice() -> Weight { // Proof Size summary in bytes: // Measured: `136` // Estimated: `1489` - // Minimum execution time: 1_785_000 picoseconds. - Weight::from_parts(1_980_000, 1489) + // Minimum execution time: 3_000_000 picoseconds. + Weight::from_parts(4_000_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) } - /// Storage: Pov Value (r:0 w:1) - /// Proof: Pov Value (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Pov::Value` (r:0 w:1) + /// Proof: `Pov::Value` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn storage_single_value_write() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 254_000 picoseconds. - Weight::from_parts(326_000, 0) + // Minimum execution time: 0_000 picoseconds. + Weight::from_parts(1_000_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Pov Value (r:0 w:1) - /// Proof: Pov Value (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Pov::Value` (r:0 w:1) + /// Proof: `Pov::Value` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn storage_single_value_kill() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 239_000 picoseconds. - Weight::from_parts(277_000, 0) + // Minimum execution time: 0_000 picoseconds. + Weight::from_parts(1_000_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Pov Map1M (r:1 w:0) - /// Proof: Pov Map1M (max_values: Some(1000000), max_size: Some(36), added: 2511, mode: Measured) + /// Storage: `Pov::Map1M` (r:1 w:0) + /// Proof: `Pov::Map1M` (`max_values`: Some(1000000), `max_size`: Some(36), added: 2511, mode: `Measured`) fn storage_1m_map_read_one_value_two_additional_layers() -> Weight { // Proof Size summary in bytes: // Measured: `1275` // Estimated: `4740` - // Minimum execution time: 4_760_000 picoseconds. - Weight::from_parts(5_051_000, 4740) + // Minimum execution time: 7_000_000 picoseconds. + Weight::from_parts(7_000_000, 4740) .saturating_add(T::DbWeight::get().reads(1_u64)) } - /// Storage: Pov Map1M (r:1 w:0) - /// Proof: Pov Map1M (max_values: Some(1000000), max_size: Some(36), added: 2511, mode: Measured) + /// Storage: `Pov::Map1M` (r:1 w:0) + /// Proof: `Pov::Map1M` (`max_values`: Some(1000000), `max_size`: Some(36), added: 2511, mode: `Measured`) fn storage_1m_map_read_one_value_three_additional_layers() -> Weight { // Proof Size summary in bytes: // Measured: `1544` // Estimated: `5009` - // Minimum execution time: 5_490_000 picoseconds. - Weight::from_parts(5_703_000, 5009) + // Minimum execution time: 8_000_000 picoseconds. + Weight::from_parts(8_000_000, 5009) .saturating_add(T::DbWeight::get().reads(1_u64)) } - /// Storage: Pov Map1M (r:1 w:0) - /// Proof: Pov Map1M (max_values: Some(1000000), max_size: Some(36), added: 2511, mode: Measured) + /// Storage: `Pov::Map1M` (r:1 w:0) + /// Proof: `Pov::Map1M` (`max_values`: Some(1000000), `max_size`: Some(36), added: 2511, mode: `Measured`) fn storage_1m_map_read_one_value_four_additional_layers() -> Weight { // Proof Size summary in bytes: // Measured: `2044` // Estimated: `5509` - // Minimum execution time: 6_397_000 picoseconds. - Weight::from_parts(7_084_000, 5509) + // Minimum execution time: 9_000_000 picoseconds. + Weight::from_parts(10_000_000, 5509) .saturating_add(T::DbWeight::get().reads(1_u64)) } - /// Storage: Pov Map1M (r:100 w:0) - /// Proof: Pov Map1M (max_values: Some(1000000), max_size: Some(36), added: 2511, mode: MaxEncodedLen) - /// Storage: Pov Map16M (r:100 w:0) - /// Proof: Pov Map16M (max_values: Some(16000000), max_size: Some(36), added: 3006, mode: MaxEncodedLen) + /// Storage: `Pov::Map1M` (r:100 w:0) + /// Proof: `Pov::Map1M` (`max_values`: Some(1000000), `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) + /// Storage: `Pov::Map16M` (r:100 w:0) + /// Proof: `Pov::Map16M` (`max_values`: Some(16000000), `max_size`: Some(36), added: 3006, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 100]`. /// The range of component `m` is `[0, 100]`. fn storage_map_read_per_component(n: u32, m: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `515 + m * (188 ±0) + n * (188 ±0)` // Estimated: `990 + m * (2511 ±0) + n * (3006 ±0)` - // Minimum execution time: 181_481_000 picoseconds. - Weight::from_parts(129_275_141, 990) - // Standard Error: 13_049 - .saturating_add(Weight::from_parts(787_667, 0).saturating_mul(n.into())) - // Standard Error: 13_049 - .saturating_add(Weight::from_parts(830_378, 0).saturating_mul(m.into())) + // Minimum execution time: 342_000_000 picoseconds. + Weight::from_parts(179_688_624, 990) + // Standard Error: 26_526 + .saturating_add(Weight::from_parts(2_061_828, 0).saturating_mul(n.into())) + // Standard Error: 26_526 + .saturating_add(Weight::from_parts(1_825_923, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(Weight::from_parts(0, 2511).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 3006).saturating_mul(n.into())) } - /// Storage: Pov Map1M (r:100 w:0) - /// Proof: Pov Map1M (max_values: Some(1000000), max_size: Some(36), added: 2511, mode: Ignored) - /// Storage: Pov Map16M (r:100 w:0) - /// Proof: Pov Map16M (max_values: Some(16000000), max_size: Some(36), added: 3006, mode: MaxEncodedLen) + /// Storage: `Pov::Map1M` (r:100 w:0) + /// Proof: `Pov::Map1M` (`max_values`: Some(1000000), `max_size`: Some(36), added: 2511, mode: `Ignored`) + /// Storage: `Pov::Map16M` (r:100 w:0) + /// Proof: `Pov::Map16M` (`max_values`: Some(16000000), `max_size`: Some(36), added: 3006, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 100]`. /// The range of component `m` is `[0, 100]`. fn storage_map_read_per_component_one_ignored(n: u32, m: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `515 + m * (188 ±0) + n * (188 ±0)` // Estimated: `1685 + m * (189 ±0) + n * (3006 ±0)` - // Minimum execution time: 181_925_000 picoseconds. - Weight::from_parts(134_416_814, 1685) - // Standard Error: 15_678 - .saturating_add(Weight::from_parts(827_168, 0).saturating_mul(n.into())) - // Standard Error: 15_678 - .saturating_add(Weight::from_parts(813_655, 0).saturating_mul(m.into())) + // Minimum execution time: 342_000_000 picoseconds. + Weight::from_parts(204_945_396, 1685) + // Standard Error: 25_217 + .saturating_add(Weight::from_parts(1_827_513, 0).saturating_mul(n.into())) + // Standard Error: 25_217 + .saturating_add(Weight::from_parts(1_661_271, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(Weight::from_parts(0, 189).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 3006).saturating_mul(n.into())) } - /// Storage: Pov Map1M (r:1 w:0) - /// Proof: Pov Map1M (max_values: Some(1000000), max_size: Some(36), added: 2511, mode: MaxEncodedLen) + /// Storage: `Pov::Map1M` (r:1 w:0) + /// Proof: `Pov::Map1M` (`max_values`: Some(1000000), `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 100]`. fn storage_1m_map_one_entry_repeated_read(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `170` // Estimated: `3501` - // Minimum execution time: 20_000 picoseconds. - Weight::from_parts(2_006_399, 3501) - // Standard Error: 808 - .saturating_add(Weight::from_parts(263_609, 0).saturating_mul(n.into())) + // Minimum execution time: 0_000 picoseconds. + Weight::from_parts(3_387_064, 3501) + // Standard Error: 1_445 + .saturating_add(Weight::from_parts(1_143_678, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) } - /// Storage: Pov Map1M (r:100 w:0) - /// Proof: Pov Map1M (max_values: Some(1000000), max_size: Some(36), added: 2511, mode: MaxEncodedLen) + /// Storage: `Pov::Map1M` (r:100 w:0) + /// Proof: `Pov::Map1M` (`max_values`: Some(1000000), `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 100]`. fn storage_1m_map_multiple_entry_repeated_read(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `147 + n * (40 ±0)` // Estimated: `990 + n * (2511 ±0)` - // Minimum execution time: 21_000 picoseconds. - Weight::from_parts(3_940_044, 990) - // Standard Error: 4_906 - .saturating_add(Weight::from_parts(3_454_882, 0).saturating_mul(n.into())) + // Minimum execution time: 0_000 picoseconds. + Weight::from_parts(1_323_684, 990) + // Standard Error: 10_546 + .saturating_add(Weight::from_parts(13_101_864, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 2511).saturating_mul(n.into())) } - /// Storage: Pov DoubleMap1M (r:1024 w:0) - /// Proof: Pov DoubleMap1M (max_values: Some(1000000), max_size: Some(68), added: 2543, mode: MaxEncodedLen) + /// Storage: `Pov::DoubleMap1M` (r:1024 w:0) + /// Proof: `Pov::DoubleMap1M` (`max_values`: Some(1000000), `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1024]`. fn storage_1m_double_map_read_per_component(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `21938 + n * (57 ±0)` // Estimated: `990 + n * (2543 ±0)` - // Minimum execution time: 28_000 picoseconds. - Weight::from_parts(20_674_869, 990) - // Standard Error: 3_035 - .saturating_add(Weight::from_parts(1_995_730, 0).saturating_mul(n.into())) + // Minimum execution time: 0_000 picoseconds. + Weight::from_parts(39_703_963, 990) + // Standard Error: 10_589 + .saturating_add(Weight::from_parts(3_718_040, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 2543).saturating_mul(n.into())) } - /// Storage: Pov BoundedValue (r:1 w:0) - /// Proof: Pov BoundedValue (max_values: Some(1), max_size: Some(33), added: 528, mode: MaxEncodedLen) + /// Storage: `Pov::BoundedValue` (r:1 w:0) + /// Proof: `Pov::BoundedValue` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) fn storage_value_bounded_read() -> Weight { // Proof Size summary in bytes: // Measured: `109` // Estimated: `1518` - // Minimum execution time: 1_091_000 picoseconds. - Weight::from_parts(1_181_000, 1518) + // Minimum execution time: 2_000_000 picoseconds. + Weight::from_parts(2_000_000, 1518) .saturating_add(T::DbWeight::get().reads(1_u64)) } - /// Storage: Pov UnboundedValue (r:1 w:0) - /// Proof Skipped: Pov UnboundedValue (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Pov::UnboundedValue` (r:1 w:0) + /// Proof: `Pov::UnboundedValue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn storage_value_unbounded_read() -> Weight { // Proof Size summary in bytes: // Measured: `109` // Estimated: `1594` - // Minimum execution time: 1_079_000 picoseconds. - Weight::from_parts(1_176_000, 1594) + // Minimum execution time: 2_000_000 picoseconds. + Weight::from_parts(2_000_000, 1594) .saturating_add(T::DbWeight::get().reads(1_u64)) } - /// Storage: Pov UnboundedValue (r:1 w:0) - /// Proof Skipped: Pov UnboundedValue (max_values: Some(1), max_size: None, mode: Ignored) + /// Storage: `Pov::UnboundedValue` (r:1 w:0) + /// Proof: `Pov::UnboundedValue` (`max_values`: Some(1), `max_size`: None, mode: `Ignored`) fn storage_value_unbounded_ignored_read() -> Weight { // Proof Size summary in bytes: // Measured: `109` // Estimated: `0` - // Minimum execution time: 1_101_000 picoseconds. - Weight::from_parts(1_160_000, 0) + // Minimum execution time: 2_000_000 picoseconds. + Weight::from_parts(2_000_000, 0) .saturating_add(T::DbWeight::get().reads(1_u64)) } - /// Storage: Pov UnboundedValue (r:1 w:0) - /// Proof Skipped: Pov UnboundedValue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Pov BoundedValue (r:1 w:0) - /// Proof: Pov BoundedValue (max_values: Some(1), max_size: Some(33), added: 528, mode: MaxEncodedLen) + /// Storage: `Pov::UnboundedValue` (r:1 w:0) + /// Proof: `Pov::UnboundedValue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Pov::BoundedValue` (r:1 w:0) + /// Proof: `Pov::BoundedValue` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) fn storage_value_bounded_and_unbounded_read() -> Weight { // Proof Size summary in bytes: // Measured: `147` // Estimated: `1632` - // Minimum execution time: 2_143_000 picoseconds. - Weight::from_parts(2_280_000, 1632) + // Minimum execution time: 3_000_000 picoseconds. + Weight::from_parts(5_000_000, 1632) .saturating_add(T::DbWeight::get().reads(2_u64)) } - /// Storage: Pov LargeValue (r:1 w:0) - /// Proof: Pov LargeValue (max_values: Some(1), max_size: Some(4194308), added: 4194803, mode: Measured) + /// Storage: `Pov::LargeValue` (r:1 w:0) + /// Proof: `Pov::LargeValue` (`max_values`: Some(1), `max_size`: Some(4194308), added: 4194803, mode: `Measured`) /// The range of component `l` is `[0, 4194304]`. fn measured_storage_value_read_linear_size(l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `142 + l * (1 ±0)` // Estimated: `1626 + l * (1 ±0)` - // Minimum execution time: 1_665_000 picoseconds. - Weight::from_parts(1_725_000, 1626) - // Standard Error: 3 - .saturating_add(Weight::from_parts(376, 0).saturating_mul(l.into())) + // Minimum execution time: 3_000_000 picoseconds. + Weight::from_parts(3_000_000, 1626) + // Standard Error: 1 + .saturating_add(Weight::from_parts(393, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(l.into())) } - /// Storage: Pov LargeValue (r:1 w:0) - /// Proof: Pov LargeValue (max_values: Some(1), max_size: Some(4194308), added: 4194803, mode: MaxEncodedLen) + /// Storage: `Pov::LargeValue` (r:1 w:0) + /// Proof: `Pov::LargeValue` (`max_values`: Some(1), `max_size`: Some(4194308), added: 4194803, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 4194304]`. fn mel_storage_value_read_linear_size(l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `142 + l * (1 ±0)` // Estimated: `4195793` - // Minimum execution time: 1_640_000 picoseconds. - Weight::from_parts(1_724_000, 4195793) - // Standard Error: 4 - .saturating_add(Weight::from_parts(395, 0).saturating_mul(l.into())) + // Minimum execution time: 3_000_000 picoseconds. + Weight::from_parts(3_000_000, 4195793) + // Standard Error: 1 + .saturating_add(Weight::from_parts(394, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) } - /// Storage: Pov LargeValue (r:1 w:0) - /// Proof: Pov LargeValue (max_values: Some(1), max_size: Some(4194308), added: 4194803, mode: Measured) - /// Storage: Pov LargeValue2 (r:1 w:0) - /// Proof: Pov LargeValue2 (max_values: Some(1), max_size: Some(4194308), added: 4194803, mode: Measured) + /// Storage: `Pov::LargeValue` (r:1 w:0) + /// Proof: `Pov::LargeValue` (`max_values`: Some(1), `max_size`: Some(4194308), added: 4194803, mode: `Measured`) + /// Storage: `Pov::LargeValue2` (r:1 w:0) + /// Proof: `Pov::LargeValue2` (`max_values`: Some(1), `max_size`: Some(4194308), added: 4194803, mode: `Measured`) /// The range of component `l` is `[0, 4194304]`. fn measured_storage_double_value_read_linear_size(l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `171 + l * (2 ±0)` // Estimated: `1655 + l * (2 ±0)` - // Minimum execution time: 2_263_000 picoseconds. - Weight::from_parts(2_358_000, 1655) - // Standard Error: 8 - .saturating_add(Weight::from_parts(737, 0).saturating_mul(l.into())) + // Minimum execution time: 4_000_000 picoseconds. + Weight::from_parts(4_000_000, 1655) + // Standard Error: 2 + .saturating_add(Weight::from_parts(655, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(Weight::from_parts(0, 2).saturating_mul(l.into())) } - /// Storage: Pov LargeValue (r:1 w:0) - /// Proof: Pov LargeValue (max_values: Some(1), max_size: Some(4194308), added: 4194803, mode: MaxEncodedLen) - /// Storage: Pov LargeValue2 (r:1 w:0) - /// Proof: Pov LargeValue2 (max_values: Some(1), max_size: Some(4194308), added: 4194803, mode: MaxEncodedLen) + /// Storage: `Pov::LargeValue` (r:1 w:0) + /// Proof: `Pov::LargeValue` (`max_values`: Some(1), `max_size`: Some(4194308), added: 4194803, mode: `MaxEncodedLen`) + /// Storage: `Pov::LargeValue2` (r:1 w:0) + /// Proof: `Pov::LargeValue2` (`max_values`: Some(1), `max_size`: Some(4194308), added: 4194803, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 4194304]`. fn mel_storage_double_value_read_linear_size(l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `171 + l * (2 ±0)` // Estimated: `4195793` - // Minimum execution time: 2_161_000 picoseconds. - Weight::from_parts(2_233_000, 4195793) - // Standard Error: 5 - .saturating_add(Weight::from_parts(639, 0).saturating_mul(l.into())) + // Minimum execution time: 4_000_000 picoseconds. + Weight::from_parts(4_000_000, 4195793) + // Standard Error: 2 + .saturating_add(Weight::from_parts(660, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) } - /// Storage: Pov LargeValue (r:1 w:0) - /// Proof: Pov LargeValue (max_values: Some(1), max_size: Some(4194308), added: 4194803, mode: MaxEncodedLen) - /// Storage: Pov LargeValue2 (r:1 w:0) - /// Proof: Pov LargeValue2 (max_values: Some(1), max_size: Some(4194308), added: 4194803, mode: Measured) + /// Storage: `Pov::LargeValue` (r:1 w:0) + /// Proof: `Pov::LargeValue` (`max_values`: Some(1), `max_size`: Some(4194308), added: 4194803, mode: `MaxEncodedLen`) + /// Storage: `Pov::LargeValue2` (r:1 w:0) + /// Proof: `Pov::LargeValue2` (`max_values`: Some(1), `max_size`: Some(4194308), added: 4194803, mode: `Measured`) /// The range of component `l` is `[0, 4194304]`. fn mel_mixed_storage_double_value_read_linear_size(l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `171 + l * (2 ±0)` // Estimated: `4195793 + l * (2 ±0)` - // Minimum execution time: 2_149_000 picoseconds. - Weight::from_parts(2_256_000, 4195793) - // Standard Error: 6 - .saturating_add(Weight::from_parts(677, 0).saturating_mul(l.into())) + // Minimum execution time: 4_000_000 picoseconds. + Weight::from_parts(4_000_000, 4195793) + // Standard Error: 4 + .saturating_add(Weight::from_parts(691, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(Weight::from_parts(0, 2).saturating_mul(l.into())) } - /// Storage: Pov LargeValue (r:1 w:0) - /// Proof: Pov LargeValue (max_values: Some(1), max_size: Some(4194308), added: 4194803, mode: Measured) - /// Storage: Pov LargeValue2 (r:1 w:0) - /// Proof: Pov LargeValue2 (max_values: Some(1), max_size: Some(4194308), added: 4194803, mode: MaxEncodedLen) + /// Storage: `Pov::LargeValue` (r:1 w:0) + /// Proof: `Pov::LargeValue` (`max_values`: Some(1), `max_size`: Some(4194308), added: 4194803, mode: `Measured`) + /// Storage: `Pov::LargeValue2` (r:1 w:0) + /// Proof: `Pov::LargeValue2` (`max_values`: Some(1), `max_size`: Some(4194308), added: 4194803, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 4194304]`. fn measured_mixed_storage_double_value_read_linear_size(l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `171 + l * (2 ±0)` // Estimated: `4195793 + l * (2 ±0)` - // Minimum execution time: 2_254_000 picoseconds. - Weight::from_parts(2_319_000, 4195793) - // Standard Error: 5 - .saturating_add(Weight::from_parts(664, 0).saturating_mul(l.into())) + // Minimum execution time: 4_000_000 picoseconds. + Weight::from_parts(4_000_000, 4195793) + // Standard Error: 4 + .saturating_add(Weight::from_parts(691, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(Weight::from_parts(0, 2).saturating_mul(l.into())) } - /// Storage: Pov UnboundedMap (r:1 w:0) - /// Proof Skipped: Pov UnboundedMap (max_values: None, max_size: None, mode: Measured) - /// Storage: Pov UnboundedMap2 (r:1 w:0) - /// Proof Skipped: Pov UnboundedMap2 (max_values: None, max_size: None, mode: Measured) + /// Storage: `Pov::UnboundedMap` (r:1 w:0) + /// Proof: `Pov::UnboundedMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Pov::UnboundedMap2` (r:1 w:0) + /// Proof: `Pov::UnboundedMap2` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `i` is `[0, 1000]`. fn storage_map_unbounded_both_measured_read(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `229 + i * (8 ±0)` // Estimated: `3693 + i * (8 ±0)` - // Minimum execution time: 3_071_000 picoseconds. - Weight::from_parts(3_487_712, 3693) - // Standard Error: 26 - .saturating_add(Weight::from_parts(748, 0).saturating_mul(i.into())) + // Minimum execution time: 6_000_000 picoseconds. + Weight::from_parts(7_274_226, 3693) + // Standard Error: 280 + .saturating_add(Weight::from_parts(3_282, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(i.into())) } - /// Storage: Pov Map1M (r:1 w:0) - /// Proof: Pov Map1M (max_values: Some(1000000), max_size: Some(36), added: 2511, mode: MaxEncodedLen) - /// Storage: Pov UnboundedMap (r:1 w:0) - /// Proof Skipped: Pov UnboundedMap (max_values: None, max_size: None, mode: Measured) + /// Storage: `Pov::Map1M` (r:1 w:0) + /// Proof: `Pov::Map1M` (`max_values`: Some(1000000), `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) + /// Storage: `Pov::UnboundedMap` (r:1 w:0) + /// Proof: `Pov::UnboundedMap` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `i` is `[0, 1000]`. fn storage_map_partial_unbounded_read(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `228 + i * (4 ±0)` // Estimated: `3692 + i * (4 ±0)` - // Minimum execution time: 3_150_000 picoseconds. - Weight::from_parts(3_582_963, 3692) - // Standard Error: 18 - .saturating_add(Weight::from_parts(380, 0).saturating_mul(i.into())) + // Minimum execution time: 7_000_000 picoseconds. + Weight::from_parts(7_507_333, 3692) + // Standard Error: 64 + .saturating_add(Weight::from_parts(982, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(Weight::from_parts(0, 4).saturating_mul(i.into())) } - /// Storage: Pov Map1M (r:1 w:0) - /// Proof: Pov Map1M (max_values: Some(1000000), max_size: Some(36), added: 2511, mode: MaxEncodedLen) - /// Storage: Pov UnboundedMap (r:1 w:0) - /// Proof Skipped: Pov UnboundedMap (max_values: None, max_size: None, mode: Ignored) + /// Storage: `Pov::Map1M` (r:1 w:0) + /// Proof: `Pov::Map1M` (`max_values`: Some(1000000), `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) + /// Storage: `Pov::UnboundedMap` (r:1 w:0) + /// Proof: `Pov::UnboundedMap` (`max_values`: None, `max_size`: None, mode: `Ignored`) /// The range of component `i` is `[0, 1000]`. fn storage_map_partial_unbounded_ignored_read(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `228 + i * (4 ±0)` // Estimated: `3501 + i * (4 ±0)` - // Minimum execution time: 3_092_000 picoseconds. - Weight::from_parts(3_595_328, 3501) - // Standard Error: 20 - .saturating_add(Weight::from_parts(243, 0).saturating_mul(i.into())) + // Minimum execution time: 6_000_000 picoseconds. + Weight::from_parts(7_285_011, 3501) + // Standard Error: 80 + .saturating_add(Weight::from_parts(1_395, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(Weight::from_parts(0, 4).saturating_mul(i.into())) } @@ -442,382 +440,382 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_705_000 picoseconds. - Weight::from_parts(1_818_000, 0) + // Minimum execution time: 4_000_000 picoseconds. + Weight::from_parts(5_000_000, 0) } fn noop() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 533_000 picoseconds. - Weight::from_parts(587_000, 0) + // Minimum execution time: 2_000_000 picoseconds. + Weight::from_parts(2_000_000, 0) } - /// Storage: Pov UnboundedMapTwox (r:65001 w:0) - /// Proof Skipped: Pov UnboundedMapTwox (max_values: None, max_size: None, mode: Measured) + /// Storage: `Pov::UnboundedMapTwox` (r:65001 w:0) + /// Proof: `Pov::UnboundedMapTwox` (`max_values`: None, `max_size`: None, mode: `Measured`) fn storage_iteration() -> Weight { // Proof Size summary in bytes: // Measured: `17985289` // Estimated: `178863754` - // Minimum execution time: 118_753_057_000 picoseconds. - Weight::from_parts(121_396_503_000, 178863754) + // Minimum execution time: 218_275_000_000 picoseconds. + Weight::from_parts(222_603_000_000, 178863754) .saturating_add(T::DbWeight::get().reads(65001_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Pov Value (r:1 w:0) - /// Proof: Pov Value (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Pov::Value` (r:1 w:0) + /// Proof: `Pov::Value` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn storage_single_value_read() -> Weight { // Proof Size summary in bytes: // Measured: `136` // Estimated: `1489` - // Minimum execution time: 1_706_000 picoseconds. - Weight::from_parts(1_788_000, 1489) + // Minimum execution time: 3_000_000 picoseconds. + Weight::from_parts(3_000_000, 1489) .saturating_add(RocksDbWeight::get().reads(1_u64)) } - /// Storage: Pov Value (r:1 w:0) - /// Proof: Pov Value (max_values: Some(1), max_size: Some(4), added: 499, mode: Ignored) + /// Storage: `Pov::Value` (r:1 w:0) + /// Proof: `Pov::Value` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Ignored`) fn storage_single_value_ignored_read() -> Weight { // Proof Size summary in bytes: // Measured: `136` // Estimated: `0` - // Minimum execution time: 1_661_000 picoseconds. - Weight::from_parts(1_718_000, 0) + // Minimum execution time: 3_000_000 picoseconds. + Weight::from_parts(3_000_000, 0) .saturating_add(RocksDbWeight::get().reads(1_u64)) } - /// Storage: Pov Value (r:1 w:0) - /// Proof: Pov Value (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Pov Value2 (r:1 w:0) - /// Proof: Pov Value2 (max_values: Some(1), max_size: Some(4), added: 499, mode: Ignored) + /// Storage: `Pov::Value` (r:1 w:0) + /// Proof: `Pov::Value` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Pov::Value2` (r:1 w:0) + /// Proof: `Pov::Value2` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Ignored`) fn storage_single_value_ignored_some_read() -> Weight { // Proof Size summary in bytes: // Measured: `160` // Estimated: `1489` - // Minimum execution time: 2_226_000 picoseconds. - Weight::from_parts(2_365_000, 1489) + // Minimum execution time: 4_000_000 picoseconds. + Weight::from_parts(4_000_000, 1489) .saturating_add(RocksDbWeight::get().reads(2_u64)) } - /// Storage: Pov Value (r:1 w:0) - /// Proof: Pov Value (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Pov::Value` (r:1 w:0) + /// Proof: `Pov::Value` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn storage_single_value_read_twice() -> Weight { // Proof Size summary in bytes: // Measured: `136` // Estimated: `1489` - // Minimum execution time: 1_785_000 picoseconds. - Weight::from_parts(1_980_000, 1489) + // Minimum execution time: 3_000_000 picoseconds. + Weight::from_parts(4_000_000, 1489) .saturating_add(RocksDbWeight::get().reads(1_u64)) } - /// Storage: Pov Value (r:0 w:1) - /// Proof: Pov Value (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Pov::Value` (r:0 w:1) + /// Proof: `Pov::Value` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn storage_single_value_write() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 254_000 picoseconds. - Weight::from_parts(326_000, 0) + // Minimum execution time: 0_000 picoseconds. + Weight::from_parts(1_000_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Pov Value (r:0 w:1) - /// Proof: Pov Value (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Pov::Value` (r:0 w:1) + /// Proof: `Pov::Value` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn storage_single_value_kill() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 239_000 picoseconds. - Weight::from_parts(277_000, 0) + // Minimum execution time: 0_000 picoseconds. + Weight::from_parts(1_000_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Pov Map1M (r:1 w:0) - /// Proof: Pov Map1M (max_values: Some(1000000), max_size: Some(36), added: 2511, mode: Measured) + /// Storage: `Pov::Map1M` (r:1 w:0) + /// Proof: `Pov::Map1M` (`max_values`: Some(1000000), `max_size`: Some(36), added: 2511, mode: `Measured`) fn storage_1m_map_read_one_value_two_additional_layers() -> Weight { // Proof Size summary in bytes: // Measured: `1275` // Estimated: `4740` - // Minimum execution time: 4_760_000 picoseconds. - Weight::from_parts(5_051_000, 4740) + // Minimum execution time: 7_000_000 picoseconds. + Weight::from_parts(7_000_000, 4740) .saturating_add(RocksDbWeight::get().reads(1_u64)) } - /// Storage: Pov Map1M (r:1 w:0) - /// Proof: Pov Map1M (max_values: Some(1000000), max_size: Some(36), added: 2511, mode: Measured) + /// Storage: `Pov::Map1M` (r:1 w:0) + /// Proof: `Pov::Map1M` (`max_values`: Some(1000000), `max_size`: Some(36), added: 2511, mode: `Measured`) fn storage_1m_map_read_one_value_three_additional_layers() -> Weight { // Proof Size summary in bytes: // Measured: `1544` // Estimated: `5009` - // Minimum execution time: 5_490_000 picoseconds. - Weight::from_parts(5_703_000, 5009) + // Minimum execution time: 8_000_000 picoseconds. + Weight::from_parts(8_000_000, 5009) .saturating_add(RocksDbWeight::get().reads(1_u64)) } - /// Storage: Pov Map1M (r:1 w:0) - /// Proof: Pov Map1M (max_values: Some(1000000), max_size: Some(36), added: 2511, mode: Measured) + /// Storage: `Pov::Map1M` (r:1 w:0) + /// Proof: `Pov::Map1M` (`max_values`: Some(1000000), `max_size`: Some(36), added: 2511, mode: `Measured`) fn storage_1m_map_read_one_value_four_additional_layers() -> Weight { // Proof Size summary in bytes: // Measured: `2044` // Estimated: `5509` - // Minimum execution time: 6_397_000 picoseconds. - Weight::from_parts(7_084_000, 5509) + // Minimum execution time: 9_000_000 picoseconds. + Weight::from_parts(10_000_000, 5509) .saturating_add(RocksDbWeight::get().reads(1_u64)) } - /// Storage: Pov Map1M (r:100 w:0) - /// Proof: Pov Map1M (max_values: Some(1000000), max_size: Some(36), added: 2511, mode: MaxEncodedLen) - /// Storage: Pov Map16M (r:100 w:0) - /// Proof: Pov Map16M (max_values: Some(16000000), max_size: Some(36), added: 3006, mode: MaxEncodedLen) + /// Storage: `Pov::Map1M` (r:100 w:0) + /// Proof: `Pov::Map1M` (`max_values`: Some(1000000), `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) + /// Storage: `Pov::Map16M` (r:100 w:0) + /// Proof: `Pov::Map16M` (`max_values`: Some(16000000), `max_size`: Some(36), added: 3006, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 100]`. /// The range of component `m` is `[0, 100]`. fn storage_map_read_per_component(n: u32, m: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `515 + m * (188 ±0) + n * (188 ±0)` // Estimated: `990 + m * (2511 ±0) + n * (3006 ±0)` - // Minimum execution time: 181_481_000 picoseconds. - Weight::from_parts(129_275_141, 990) - // Standard Error: 13_049 - .saturating_add(Weight::from_parts(787_667, 0).saturating_mul(n.into())) - // Standard Error: 13_049 - .saturating_add(Weight::from_parts(830_378, 0).saturating_mul(m.into())) + // Minimum execution time: 342_000_000 picoseconds. + Weight::from_parts(179_688_624, 990) + // Standard Error: 26_526 + .saturating_add(Weight::from_parts(2_061_828, 0).saturating_mul(n.into())) + // Standard Error: 26_526 + .saturating_add(Weight::from_parts(1_825_923, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(Weight::from_parts(0, 2511).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 3006).saturating_mul(n.into())) } - /// Storage: Pov Map1M (r:100 w:0) - /// Proof: Pov Map1M (max_values: Some(1000000), max_size: Some(36), added: 2511, mode: Ignored) - /// Storage: Pov Map16M (r:100 w:0) - /// Proof: Pov Map16M (max_values: Some(16000000), max_size: Some(36), added: 3006, mode: MaxEncodedLen) + /// Storage: `Pov::Map1M` (r:100 w:0) + /// Proof: `Pov::Map1M` (`max_values`: Some(1000000), `max_size`: Some(36), added: 2511, mode: `Ignored`) + /// Storage: `Pov::Map16M` (r:100 w:0) + /// Proof: `Pov::Map16M` (`max_values`: Some(16000000), `max_size`: Some(36), added: 3006, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 100]`. /// The range of component `m` is `[0, 100]`. fn storage_map_read_per_component_one_ignored(n: u32, m: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `515 + m * (188 ±0) + n * (188 ±0)` // Estimated: `1685 + m * (189 ±0) + n * (3006 ±0)` - // Minimum execution time: 181_925_000 picoseconds. - Weight::from_parts(134_416_814, 1685) - // Standard Error: 15_678 - .saturating_add(Weight::from_parts(827_168, 0).saturating_mul(n.into())) - // Standard Error: 15_678 - .saturating_add(Weight::from_parts(813_655, 0).saturating_mul(m.into())) + // Minimum execution time: 342_000_000 picoseconds. + Weight::from_parts(204_945_396, 1685) + // Standard Error: 25_217 + .saturating_add(Weight::from_parts(1_827_513, 0).saturating_mul(n.into())) + // Standard Error: 25_217 + .saturating_add(Weight::from_parts(1_661_271, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(Weight::from_parts(0, 189).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 3006).saturating_mul(n.into())) } - /// Storage: Pov Map1M (r:1 w:0) - /// Proof: Pov Map1M (max_values: Some(1000000), max_size: Some(36), added: 2511, mode: MaxEncodedLen) + /// Storage: `Pov::Map1M` (r:1 w:0) + /// Proof: `Pov::Map1M` (`max_values`: Some(1000000), `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 100]`. fn storage_1m_map_one_entry_repeated_read(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `170` // Estimated: `3501` - // Minimum execution time: 20_000 picoseconds. - Weight::from_parts(2_006_399, 3501) - // Standard Error: 808 - .saturating_add(Weight::from_parts(263_609, 0).saturating_mul(n.into())) + // Minimum execution time: 0_000 picoseconds. + Weight::from_parts(3_387_064, 3501) + // Standard Error: 1_445 + .saturating_add(Weight::from_parts(1_143_678, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) } - /// Storage: Pov Map1M (r:100 w:0) - /// Proof: Pov Map1M (max_values: Some(1000000), max_size: Some(36), added: 2511, mode: MaxEncodedLen) + /// Storage: `Pov::Map1M` (r:100 w:0) + /// Proof: `Pov::Map1M` (`max_values`: Some(1000000), `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 100]`. fn storage_1m_map_multiple_entry_repeated_read(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `147 + n * (40 ±0)` // Estimated: `990 + n * (2511 ±0)` - // Minimum execution time: 21_000 picoseconds. - Weight::from_parts(3_940_044, 990) - // Standard Error: 4_906 - .saturating_add(Weight::from_parts(3_454_882, 0).saturating_mul(n.into())) + // Minimum execution time: 0_000 picoseconds. + Weight::from_parts(1_323_684, 990) + // Standard Error: 10_546 + .saturating_add(Weight::from_parts(13_101_864, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 2511).saturating_mul(n.into())) } - /// Storage: Pov DoubleMap1M (r:1024 w:0) - /// Proof: Pov DoubleMap1M (max_values: Some(1000000), max_size: Some(68), added: 2543, mode: MaxEncodedLen) + /// Storage: `Pov::DoubleMap1M` (r:1024 w:0) + /// Proof: `Pov::DoubleMap1M` (`max_values`: Some(1000000), `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1024]`. fn storage_1m_double_map_read_per_component(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `21938 + n * (57 ±0)` // Estimated: `990 + n * (2543 ±0)` - // Minimum execution time: 28_000 picoseconds. - Weight::from_parts(20_674_869, 990) - // Standard Error: 3_035 - .saturating_add(Weight::from_parts(1_995_730, 0).saturating_mul(n.into())) + // Minimum execution time: 0_000 picoseconds. + Weight::from_parts(39_703_963, 990) + // Standard Error: 10_589 + .saturating_add(Weight::from_parts(3_718_040, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 2543).saturating_mul(n.into())) } - /// Storage: Pov BoundedValue (r:1 w:0) - /// Proof: Pov BoundedValue (max_values: Some(1), max_size: Some(33), added: 528, mode: MaxEncodedLen) + /// Storage: `Pov::BoundedValue` (r:1 w:0) + /// Proof: `Pov::BoundedValue` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) fn storage_value_bounded_read() -> Weight { // Proof Size summary in bytes: // Measured: `109` // Estimated: `1518` - // Minimum execution time: 1_091_000 picoseconds. - Weight::from_parts(1_181_000, 1518) + // Minimum execution time: 2_000_000 picoseconds. + Weight::from_parts(2_000_000, 1518) .saturating_add(RocksDbWeight::get().reads(1_u64)) } - /// Storage: Pov UnboundedValue (r:1 w:0) - /// Proof Skipped: Pov UnboundedValue (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Pov::UnboundedValue` (r:1 w:0) + /// Proof: `Pov::UnboundedValue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn storage_value_unbounded_read() -> Weight { // Proof Size summary in bytes: // Measured: `109` // Estimated: `1594` - // Minimum execution time: 1_079_000 picoseconds. - Weight::from_parts(1_176_000, 1594) + // Minimum execution time: 2_000_000 picoseconds. + Weight::from_parts(2_000_000, 1594) .saturating_add(RocksDbWeight::get().reads(1_u64)) } - /// Storage: Pov UnboundedValue (r:1 w:0) - /// Proof Skipped: Pov UnboundedValue (max_values: Some(1), max_size: None, mode: Ignored) + /// Storage: `Pov::UnboundedValue` (r:1 w:0) + /// Proof: `Pov::UnboundedValue` (`max_values`: Some(1), `max_size`: None, mode: `Ignored`) fn storage_value_unbounded_ignored_read() -> Weight { // Proof Size summary in bytes: // Measured: `109` // Estimated: `0` - // Minimum execution time: 1_101_000 picoseconds. - Weight::from_parts(1_160_000, 0) + // Minimum execution time: 2_000_000 picoseconds. + Weight::from_parts(2_000_000, 0) .saturating_add(RocksDbWeight::get().reads(1_u64)) } - /// Storage: Pov UnboundedValue (r:1 w:0) - /// Proof Skipped: Pov UnboundedValue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Pov BoundedValue (r:1 w:0) - /// Proof: Pov BoundedValue (max_values: Some(1), max_size: Some(33), added: 528, mode: MaxEncodedLen) + /// Storage: `Pov::UnboundedValue` (r:1 w:0) + /// Proof: `Pov::UnboundedValue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Pov::BoundedValue` (r:1 w:0) + /// Proof: `Pov::BoundedValue` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) fn storage_value_bounded_and_unbounded_read() -> Weight { // Proof Size summary in bytes: // Measured: `147` // Estimated: `1632` - // Minimum execution time: 2_143_000 picoseconds. - Weight::from_parts(2_280_000, 1632) + // Minimum execution time: 3_000_000 picoseconds. + Weight::from_parts(5_000_000, 1632) .saturating_add(RocksDbWeight::get().reads(2_u64)) } - /// Storage: Pov LargeValue (r:1 w:0) - /// Proof: Pov LargeValue (max_values: Some(1), max_size: Some(4194308), added: 4194803, mode: Measured) + /// Storage: `Pov::LargeValue` (r:1 w:0) + /// Proof: `Pov::LargeValue` (`max_values`: Some(1), `max_size`: Some(4194308), added: 4194803, mode: `Measured`) /// The range of component `l` is `[0, 4194304]`. fn measured_storage_value_read_linear_size(l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `142 + l * (1 ±0)` // Estimated: `1626 + l * (1 ±0)` - // Minimum execution time: 1_665_000 picoseconds. - Weight::from_parts(1_725_000, 1626) - // Standard Error: 3 - .saturating_add(Weight::from_parts(376, 0).saturating_mul(l.into())) + // Minimum execution time: 3_000_000 picoseconds. + Weight::from_parts(3_000_000, 1626) + // Standard Error: 1 + .saturating_add(Weight::from_parts(393, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(l.into())) } - /// Storage: Pov LargeValue (r:1 w:0) - /// Proof: Pov LargeValue (max_values: Some(1), max_size: Some(4194308), added: 4194803, mode: MaxEncodedLen) + /// Storage: `Pov::LargeValue` (r:1 w:0) + /// Proof: `Pov::LargeValue` (`max_values`: Some(1), `max_size`: Some(4194308), added: 4194803, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 4194304]`. fn mel_storage_value_read_linear_size(l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `142 + l * (1 ±0)` // Estimated: `4195793` - // Minimum execution time: 1_640_000 picoseconds. - Weight::from_parts(1_724_000, 4195793) - // Standard Error: 4 - .saturating_add(Weight::from_parts(395, 0).saturating_mul(l.into())) + // Minimum execution time: 3_000_000 picoseconds. + Weight::from_parts(3_000_000, 4195793) + // Standard Error: 1 + .saturating_add(Weight::from_parts(394, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) } - /// Storage: Pov LargeValue (r:1 w:0) - /// Proof: Pov LargeValue (max_values: Some(1), max_size: Some(4194308), added: 4194803, mode: Measured) - /// Storage: Pov LargeValue2 (r:1 w:0) - /// Proof: Pov LargeValue2 (max_values: Some(1), max_size: Some(4194308), added: 4194803, mode: Measured) + /// Storage: `Pov::LargeValue` (r:1 w:0) + /// Proof: `Pov::LargeValue` (`max_values`: Some(1), `max_size`: Some(4194308), added: 4194803, mode: `Measured`) + /// Storage: `Pov::LargeValue2` (r:1 w:0) + /// Proof: `Pov::LargeValue2` (`max_values`: Some(1), `max_size`: Some(4194308), added: 4194803, mode: `Measured`) /// The range of component `l` is `[0, 4194304]`. fn measured_storage_double_value_read_linear_size(l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `171 + l * (2 ±0)` // Estimated: `1655 + l * (2 ±0)` - // Minimum execution time: 2_263_000 picoseconds. - Weight::from_parts(2_358_000, 1655) - // Standard Error: 8 - .saturating_add(Weight::from_parts(737, 0).saturating_mul(l.into())) + // Minimum execution time: 4_000_000 picoseconds. + Weight::from_parts(4_000_000, 1655) + // Standard Error: 2 + .saturating_add(Weight::from_parts(655, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(Weight::from_parts(0, 2).saturating_mul(l.into())) } - /// Storage: Pov LargeValue (r:1 w:0) - /// Proof: Pov LargeValue (max_values: Some(1), max_size: Some(4194308), added: 4194803, mode: MaxEncodedLen) - /// Storage: Pov LargeValue2 (r:1 w:0) - /// Proof: Pov LargeValue2 (max_values: Some(1), max_size: Some(4194308), added: 4194803, mode: MaxEncodedLen) + /// Storage: `Pov::LargeValue` (r:1 w:0) + /// Proof: `Pov::LargeValue` (`max_values`: Some(1), `max_size`: Some(4194308), added: 4194803, mode: `MaxEncodedLen`) + /// Storage: `Pov::LargeValue2` (r:1 w:0) + /// Proof: `Pov::LargeValue2` (`max_values`: Some(1), `max_size`: Some(4194308), added: 4194803, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 4194304]`. fn mel_storage_double_value_read_linear_size(l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `171 + l * (2 ±0)` // Estimated: `4195793` - // Minimum execution time: 2_161_000 picoseconds. - Weight::from_parts(2_233_000, 4195793) - // Standard Error: 5 - .saturating_add(Weight::from_parts(639, 0).saturating_mul(l.into())) + // Minimum execution time: 4_000_000 picoseconds. + Weight::from_parts(4_000_000, 4195793) + // Standard Error: 2 + .saturating_add(Weight::from_parts(660, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) } - /// Storage: Pov LargeValue (r:1 w:0) - /// Proof: Pov LargeValue (max_values: Some(1), max_size: Some(4194308), added: 4194803, mode: MaxEncodedLen) - /// Storage: Pov LargeValue2 (r:1 w:0) - /// Proof: Pov LargeValue2 (max_values: Some(1), max_size: Some(4194308), added: 4194803, mode: Measured) + /// Storage: `Pov::LargeValue` (r:1 w:0) + /// Proof: `Pov::LargeValue` (`max_values`: Some(1), `max_size`: Some(4194308), added: 4194803, mode: `MaxEncodedLen`) + /// Storage: `Pov::LargeValue2` (r:1 w:0) + /// Proof: `Pov::LargeValue2` (`max_values`: Some(1), `max_size`: Some(4194308), added: 4194803, mode: `Measured`) /// The range of component `l` is `[0, 4194304]`. fn mel_mixed_storage_double_value_read_linear_size(l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `171 + l * (2 ±0)` // Estimated: `4195793 + l * (2 ±0)` - // Minimum execution time: 2_149_000 picoseconds. - Weight::from_parts(2_256_000, 4195793) - // Standard Error: 6 - .saturating_add(Weight::from_parts(677, 0).saturating_mul(l.into())) + // Minimum execution time: 4_000_000 picoseconds. + Weight::from_parts(4_000_000, 4195793) + // Standard Error: 4 + .saturating_add(Weight::from_parts(691, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(Weight::from_parts(0, 2).saturating_mul(l.into())) } - /// Storage: Pov LargeValue (r:1 w:0) - /// Proof: Pov LargeValue (max_values: Some(1), max_size: Some(4194308), added: 4194803, mode: Measured) - /// Storage: Pov LargeValue2 (r:1 w:0) - /// Proof: Pov LargeValue2 (max_values: Some(1), max_size: Some(4194308), added: 4194803, mode: MaxEncodedLen) + /// Storage: `Pov::LargeValue` (r:1 w:0) + /// Proof: `Pov::LargeValue` (`max_values`: Some(1), `max_size`: Some(4194308), added: 4194803, mode: `Measured`) + /// Storage: `Pov::LargeValue2` (r:1 w:0) + /// Proof: `Pov::LargeValue2` (`max_values`: Some(1), `max_size`: Some(4194308), added: 4194803, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 4194304]`. fn measured_mixed_storage_double_value_read_linear_size(l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `171 + l * (2 ±0)` // Estimated: `4195793 + l * (2 ±0)` - // Minimum execution time: 2_254_000 picoseconds. - Weight::from_parts(2_319_000, 4195793) - // Standard Error: 5 - .saturating_add(Weight::from_parts(664, 0).saturating_mul(l.into())) + // Minimum execution time: 4_000_000 picoseconds. + Weight::from_parts(4_000_000, 4195793) + // Standard Error: 4 + .saturating_add(Weight::from_parts(691, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(Weight::from_parts(0, 2).saturating_mul(l.into())) } - /// Storage: Pov UnboundedMap (r:1 w:0) - /// Proof Skipped: Pov UnboundedMap (max_values: None, max_size: None, mode: Measured) - /// Storage: Pov UnboundedMap2 (r:1 w:0) - /// Proof Skipped: Pov UnboundedMap2 (max_values: None, max_size: None, mode: Measured) + /// Storage: `Pov::UnboundedMap` (r:1 w:0) + /// Proof: `Pov::UnboundedMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Pov::UnboundedMap2` (r:1 w:0) + /// Proof: `Pov::UnboundedMap2` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `i` is `[0, 1000]`. fn storage_map_unbounded_both_measured_read(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `229 + i * (8 ±0)` // Estimated: `3693 + i * (8 ±0)` - // Minimum execution time: 3_071_000 picoseconds. - Weight::from_parts(3_487_712, 3693) - // Standard Error: 26 - .saturating_add(Weight::from_parts(748, 0).saturating_mul(i.into())) + // Minimum execution time: 6_000_000 picoseconds. + Weight::from_parts(7_274_226, 3693) + // Standard Error: 280 + .saturating_add(Weight::from_parts(3_282, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(i.into())) } - /// Storage: Pov Map1M (r:1 w:0) - /// Proof: Pov Map1M (max_values: Some(1000000), max_size: Some(36), added: 2511, mode: MaxEncodedLen) - /// Storage: Pov UnboundedMap (r:1 w:0) - /// Proof Skipped: Pov UnboundedMap (max_values: None, max_size: None, mode: Measured) + /// Storage: `Pov::Map1M` (r:1 w:0) + /// Proof: `Pov::Map1M` (`max_values`: Some(1000000), `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) + /// Storage: `Pov::UnboundedMap` (r:1 w:0) + /// Proof: `Pov::UnboundedMap` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `i` is `[0, 1000]`. fn storage_map_partial_unbounded_read(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `228 + i * (4 ±0)` // Estimated: `3692 + i * (4 ±0)` - // Minimum execution time: 3_150_000 picoseconds. - Weight::from_parts(3_582_963, 3692) - // Standard Error: 18 - .saturating_add(Weight::from_parts(380, 0).saturating_mul(i.into())) + // Minimum execution time: 7_000_000 picoseconds. + Weight::from_parts(7_507_333, 3692) + // Standard Error: 64 + .saturating_add(Weight::from_parts(982, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(Weight::from_parts(0, 4).saturating_mul(i.into())) } - /// Storage: Pov Map1M (r:1 w:0) - /// Proof: Pov Map1M (max_values: Some(1000000), max_size: Some(36), added: 2511, mode: MaxEncodedLen) - /// Storage: Pov UnboundedMap (r:1 w:0) - /// Proof Skipped: Pov UnboundedMap (max_values: None, max_size: None, mode: Ignored) + /// Storage: `Pov::Map1M` (r:1 w:0) + /// Proof: `Pov::Map1M` (`max_values`: Some(1000000), `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) + /// Storage: `Pov::UnboundedMap` (r:1 w:0) + /// Proof: `Pov::UnboundedMap` (`max_values`: None, `max_size`: None, mode: `Ignored`) /// The range of component `i` is `[0, 1000]`. fn storage_map_partial_unbounded_ignored_read(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `228 + i * (4 ±0)` // Estimated: `3501 + i * (4 ±0)` - // Minimum execution time: 3_092_000 picoseconds. - Weight::from_parts(3_595_328, 3501) - // Standard Error: 20 - .saturating_add(Weight::from_parts(243, 0).saturating_mul(i.into())) + // Minimum execution time: 6_000_000 picoseconds. + Weight::from_parts(7_285_011, 3501) + // Standard Error: 80 + .saturating_add(Weight::from_parts(1_395, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(Weight::from_parts(0, 4).saturating_mul(i.into())) } @@ -825,24 +823,24 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_705_000 picoseconds. - Weight::from_parts(1_818_000, 0) + // Minimum execution time: 4_000_000 picoseconds. + Weight::from_parts(5_000_000, 0) } fn noop() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 533_000 picoseconds. - Weight::from_parts(587_000, 0) + // Minimum execution time: 2_000_000 picoseconds. + Weight::from_parts(2_000_000, 0) } - /// Storage: Pov UnboundedMapTwox (r:65001 w:0) - /// Proof Skipped: Pov UnboundedMapTwox (max_values: None, max_size: None, mode: Measured) + /// Storage: `Pov::UnboundedMapTwox` (r:65001 w:0) + /// Proof: `Pov::UnboundedMapTwox` (`max_values`: None, `max_size`: None, mode: `Measured`) fn storage_iteration() -> Weight { // Proof Size summary in bytes: // Measured: `17985289` // Estimated: `178863754` - // Minimum execution time: 118_753_057_000 picoseconds. - Weight::from_parts(121_396_503_000, 178863754) + // Minimum execution time: 218_275_000_000 picoseconds. + Weight::from_parts(222_603_000_000, 178863754) .saturating_add(RocksDbWeight::get().reads(65001_u64)) } } diff --git a/substrate/frame/benchmarking/src/analysis.rs b/substrate/frame/benchmarking/src/analysis.rs index 5fc3abb5a27f..987078ff79a8 100644 --- a/substrate/frame/benchmarking/src/analysis.rs +++ b/substrate/frame/benchmarking/src/analysis.rs @@ -41,7 +41,7 @@ pub enum BenchmarkSelector { /// Multiplies the value by 1000 and converts it into an u128. fn mul_1000_into_u128(value: f64) -> u128 { - // This is slighly more precise than the alternative of `(value * 1000.0) as u128`. + // This is slightly more precise than the alternative of `(value * 1000.0) as u128`. (value as u128) .saturating_mul(1000) .saturating_add((value.fract() * 1000.0) as u128) diff --git a/substrate/frame/benchmarking/src/lib.rs b/substrate/frame/benchmarking/src/lib.rs index 4d44df794f02..dbf15df4f9f0 100644 --- a/substrate/frame/benchmarking/src/lib.rs +++ b/substrate/frame/benchmarking/src/lib.rs @@ -204,7 +204,7 @@ pub use v1::*; /// ## Where Clause /// /// Some pallets require a where clause specifying constraints on their generics to make -/// writing benchmarks feasible. To accomodate this situation, you can provide such a where +/// writing benchmarks feasible. To accommodate this situation, you can provide such a where /// clause as the (only) argument to the `#[benchmarks]` or `#[instance_benchmarks]` attribute /// macros. Below is an example of this taken from the `message-queue` pallet. /// diff --git a/substrate/frame/benchmarking/src/v1.rs b/substrate/frame/benchmarking/src/v1.rs index 4ce8cb815397..5baa1841dcaf 100644 --- a/substrate/frame/benchmarking/src/v1.rs +++ b/substrate/frame/benchmarking/src/v1.rs @@ -874,7 +874,7 @@ macro_rules! impl_bench_name_tests { $crate::BenchmarkError::Override(_) => { // This is still considered a success condition. $crate::__private::log::error!( - "WARNING: benchmark error overrided - {}", + "WARNING: benchmark error overridden - {}", stringify!($name), ); }, @@ -1185,7 +1185,7 @@ macro_rules! impl_benchmark { /// author chooses not to implement benchmarks. #[allow(unused)] fn test_bench_by_name(name: &[u8]) -> Result<(), $crate::BenchmarkError> { - let name = alloc::str::from_utf8(name) + let name = ::alloc::str::from_utf8(name) .map_err(|_| -> $crate::BenchmarkError { "`name` is not a valid utf8 string!".into() })?; match name { $( stringify!($name) => { @@ -1225,7 +1225,7 @@ macro_rules! impl_benchmark_test { >::components(&selected_benchmark); let execute_benchmark = | - c: alloc::vec::Vec<($crate::BenchmarkParameter, u32)> + c: ::alloc::vec::Vec<($crate::BenchmarkParameter, u32)> | -> Result<(), $crate::BenchmarkError> { // Always reset the state after the benchmark. $crate::__private::defer!($crate::benchmarking::wipe_db()); @@ -1266,7 +1266,7 @@ macro_rules! impl_benchmark_test { // and up to num_values-2 more equidistant values in between. // For 0..10 and num_values=6 this would mean: [0, 2, 4, 6, 8, 10] - let mut values = alloc::vec![low]; + let mut values = ::alloc::vec![low]; let diff = (high - low).min(num_values - 1); let slope = (high - low) as f32 / diff as f32; @@ -1278,7 +1278,7 @@ macro_rules! impl_benchmark_test { for component_value in values { // Select the max value for all the other components. - let c: alloc::vec::Vec<($crate::BenchmarkParameter, u32)> = components + let c: ::alloc::vec::Vec<($crate::BenchmarkParameter, u32)> = components .iter() .map(|(n, _, h)| if *n == name { @@ -1684,7 +1684,7 @@ macro_rules! impl_test_function { Err(err) => { println!( "{}: {:?}", - alloc::str::from_utf8(benchmark_name) + ::alloc::str::from_utf8(benchmark_name) .expect("benchmark name is always a valid string!"), err, ); @@ -1695,7 +1695,7 @@ macro_rules! impl_test_function { $crate::BenchmarkError::Stop(err) => { println!( "{}: {:?}", - alloc::str::from_utf8(benchmark_name) + ::alloc::str::from_utf8(benchmark_name) .expect("benchmark name is always a valid string!"), err, ); @@ -1704,8 +1704,8 @@ macro_rules! impl_test_function { $crate::BenchmarkError::Override(_) => { // This is still considered a success condition. $crate::__private::log::error!( - "WARNING: benchmark error overrided - {}", - alloc::str::from_utf8(benchmark_name) + "WARNING: benchmark error overridden - {}", + ::alloc::str::from_utf8(benchmark_name) .expect("benchmark name is always a valid string!"), ); }, @@ -1713,7 +1713,7 @@ macro_rules! impl_test_function { // This is considered a success condition. $crate::__private::log::error!( "WARNING: benchmark error skipped - {}", - alloc::str::from_utf8(benchmark_name) + ::alloc::str::from_utf8(benchmark_name) .expect("benchmark name is always a valid string!"), ); } @@ -1721,7 +1721,7 @@ macro_rules! impl_test_function { // This is considered a success condition. $crate::__private::log::error!( "WARNING: benchmark weightless skipped - {}", - alloc::str::from_utf8(benchmark_name) + ::alloc::str::from_utf8(benchmark_name) .expect("benchmark name is always a valid string!"), ); } @@ -1851,8 +1851,8 @@ macro_rules! add_benchmark { Err($crate::BenchmarkError::Override(mut result)) => { // Insert override warning as the first storage key. $crate::__private::log::error!( - "WARNING: benchmark error overrided - {}", - alloc::str::from_utf8(benchmark) + "WARNING: benchmark error overridden - {}", + ::alloc::str::from_utf8(benchmark) .expect("benchmark name is always a valid string!") ); result.keys.insert(0, (b"Benchmark Override".to_vec(), 0, 0, false)); diff --git a/substrate/frame/bounties/Cargo.toml b/substrate/frame/bounties/Cargo.toml index a5e3d863f27e..597a9c66c75b 100644 --- a/substrate/frame/bounties/Cargo.toml +++ b/substrate/frame/bounties/Cargo.toml @@ -20,7 +20,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/broker/Cargo.toml b/substrate/frame/broker/Cargo.toml index 5ca08d00e33c..9c5af0ee1f2a 100644 --- a/substrate/frame/broker/Cargo.toml +++ b/substrate/frame/broker/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } bitvec = { version = "1.0.0", default-features = false } sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } sp-core = { path = "../../primitives/core", default-features = false } diff --git a/substrate/frame/broker/src/benchmarking.rs b/substrate/frame/broker/src/benchmarking.rs index badca3de4d9f..4331150801ca 100644 --- a/substrate/frame/broker/src/benchmarking.rs +++ b/substrate/frame/broker/src/benchmarking.rs @@ -918,6 +918,23 @@ mod benches { Ok(()) } + #[benchmark] + fn swap_leases() -> Result<(), BenchmarkError> { + let admin_origin = + T::AdminOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + + // Add two leases in `Leases` + let n = (T::MaxLeasedCores::get() / 2) as usize; + let mut leases = vec![LeaseRecordItem { task: 1, until: 10u32.into() }; n]; + leases.extend(vec![LeaseRecordItem { task: 2, until: 20u32.into() }; n]); + Leases::::put(BoundedVec::try_from(leases).unwrap()); + + #[extrinsic_call] + _(admin_origin as T::RuntimeOrigin, 1, 2); + + Ok(()) + } + // Implements a test for each benchmark. Execute with: // `cargo test -p pallet-broker --features runtime-benchmarks`. impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/substrate/frame/broker/src/dispatchable_impls.rs b/substrate/frame/broker/src/dispatchable_impls.rs index f2451013251f..74cda9c4f4cb 100644 --- a/substrate/frame/broker/src/dispatchable_impls.rs +++ b/substrate/frame/broker/src/dispatchable_impls.rs @@ -437,4 +437,22 @@ impl Pallet { Self::deposit_event(Event::AllowedRenewalDropped { core, when }); Ok(()) } + + pub(crate) fn do_swap_leases(id: TaskId, other: TaskId) -> DispatchResult { + let mut id_leases_count = 0; + let mut other_leases_count = 0; + Leases::::mutate(|leases| { + leases.iter_mut().for_each(|lease| { + if lease.task == id { + lease.task = other; + id_leases_count += 1; + } else if lease.task == other { + lease.task = id; + other_leases_count += 1; + } + }) + }); + + Ok(()) + } } diff --git a/substrate/frame/broker/src/lib.rs b/substrate/frame/broker/src/lib.rs index f8af41b2e356..beed900d3421 100644 --- a/substrate/frame/broker/src/lib.rs +++ b/substrate/frame/broker/src/lib.rs @@ -685,7 +685,7 @@ pub mod pallet { /// - `origin`: Must be a Signed origin of the account which owns the Region `region_id`. /// - `region_id`: The Region which was assigned to the Pool. /// - `max_timeslices`: The maximum number of timeslices which should be processed. This may - /// effect the weight of the call but should be ideally made equivalant to the length of + /// effect the weight of the call but should be ideally made equivalent to the length of /// the Region `region_id`. If it is less than this, then further dispatches will be /// required with the `region_id` which makes up any remainders of the region to be /// collected. @@ -788,5 +788,13 @@ pub mod pallet { Self::do_notify_core_count(core_count)?; Ok(()) } + + #[pallet::call_index(99)] + #[pallet::weight(T::WeightInfo::swap_leases())] + pub fn swap_leases(origin: OriginFor, id: TaskId, other: TaskId) -> DispatchResult { + T::AdminOrigin::ensure_origin_or_root(origin)?; + Self::do_swap_leases(id, other)?; + Ok(()) + } } } diff --git a/substrate/frame/broker/src/types.rs b/substrate/frame/broker/src/types.rs index 7e9f351723a5..e8119d29ef5d 100644 --- a/substrate/frame/broker/src/types.rs +++ b/substrate/frame/broker/src/types.rs @@ -55,7 +55,7 @@ pub enum Finality { pub struct RegionId { /// The timeslice at which this Region begins. pub begin: Timeslice, - /// The index of the Polakdot Core on which this Region will be scheduled. + /// The index of the Polkadot Core on which this Region will be scheduled. pub core: CoreIndex, /// The regularity parts in which this Region will be scheduled. pub mask: CoreMask, @@ -198,7 +198,7 @@ pub struct PoolIoRecord { /// The total change of the portion of the pool supplied by purchased Bulk Coretime, measured /// in Core Mask Bits. pub private: SignedCoreMaskBitCount, - /// The total change of the portion of the pool supplied by the Polkaot System, measured in + /// The total change of the portion of the pool supplied by the Polkadot System, measured in /// Core Mask Bits. pub system: SignedCoreMaskBitCount, } diff --git a/substrate/frame/broker/src/weights.rs b/substrate/frame/broker/src/weights.rs index a8f50eeee6e6..a8b9fb598b8b 100644 --- a/substrate/frame/broker/src/weights.rs +++ b/substrate/frame/broker/src/weights.rs @@ -17,10 +17,10 @@ //! Autogenerated weights for `pallet_broker` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-04, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-pzhd7p6z-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -76,6 +76,7 @@ pub trait WeightInfo { fn request_revenue_info_at() -> Weight; fn notify_core_count() -> Weight; fn do_tick_base() -> Weight; + fn swap_leases() -> Weight; } /// Weights for `pallet_broker` using the Substrate node and recommended hardware. @@ -87,8 +88,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_040_000 picoseconds. - Weight::from_parts(3_344_000, 0) + // Minimum execution time: 2_865_000 picoseconds. + Weight::from_parts(3_061_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::Reservations` (r:1 w:1) @@ -97,8 +98,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `5016` // Estimated: `7496` - // Minimum execution time: 21_259_000 picoseconds. - Weight::from_parts(22_110_000, 7496) + // Minimum execution time: 18_431_000 picoseconds. + Weight::from_parts(19_558_000, 7496) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -108,8 +109,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6218` // Estimated: `7496` - // Minimum execution time: 20_330_000 picoseconds. - Weight::from_parts(20_826_000, 7496) + // Minimum execution time: 17_724_000 picoseconds. + Weight::from_parts(18_688_000, 7496) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -119,8 +120,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 13_411_000 picoseconds. - Weight::from_parts(13_960_000, 1526) + // Minimum execution time: 10_513_000 picoseconds. + Weight::from_parts(11_138_000, 1526) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -139,14 +140,12 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Broker::Workplan` (r:0 w:10) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn start_sales(n: u32, ) -> Weight { + fn start_sales(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `6330` // Estimated: `8499` - // Minimum execution time: 57_770_000 picoseconds. - Weight::from_parts(61_047_512, 8499) - // Standard Error: 165 - .saturating_add(Weight::from_parts(3, 0).saturating_mul(n.into())) + // Minimum execution time: 50_864_000 picoseconds. + Weight::from_parts(54_000_280, 8499) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(16_u64)) } @@ -162,10 +161,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) fn purchase() -> Weight { // Proof Size summary in bytes: - // Measured: `568` - // Estimated: `2053` - // Minimum execution time: 51_196_000 picoseconds. - Weight::from_parts(52_382_000, 2053) + // Measured: `635` + // Estimated: `2120` + // Minimum execution time: 43_630_000 picoseconds. + Weight::from_parts(44_622_000, 2120) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -185,10 +184,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: - // Measured: `686` + // Measured: `753` // Estimated: `4698` - // Minimum execution time: 71_636_000 picoseconds. - Weight::from_parts(73_679_000, 4698) + // Minimum execution time: 62_453_000 picoseconds. + Weight::from_parts(63_882_000, 4698) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -198,8 +197,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3550` - // Minimum execution time: 19_182_000 picoseconds. - Weight::from_parts(19_775_000, 3550) + // Minimum execution time: 17_237_000 picoseconds. + Weight::from_parts(17_757_000, 3550) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -209,21 +208,21 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3550` - // Minimum execution time: 20_688_000 picoseconds. - Weight::from_parts(21_557_000, 3550) + // Minimum execution time: 18_504_000 picoseconds. + Weight::from_parts(19_273_000, 3550) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: `Broker::Regions` (r:1 w:2) + /// Storage: `Broker::Regions` (r:1 w:3) /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) fn interlace() -> Weight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3550` - // Minimum execution time: 21_190_000 picoseconds. - Weight::from_parts(22_215_000, 3550) + // Minimum execution time: 20_477_000 picoseconds. + Weight::from_parts(21_328_000, 3550) .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Broker::Configuration` (r:1 w:0) /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) @@ -237,8 +236,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `740` // Estimated: `4681` - // Minimum execution time: 34_591_000 picoseconds. - Weight::from_parts(36_227_000, 4681) + // Minimum execution time: 31_815_000 picoseconds. + Weight::from_parts(32_700_000, 4681) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -256,8 +255,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `775` // Estimated: `5996` - // Minimum execution time: 40_346_000 picoseconds. - Weight::from_parts(41_951_000, 5996) + // Minimum execution time: 38_313_000 picoseconds. + Weight::from_parts(38_985_000, 5996) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -272,10 +271,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `859` // Estimated: `6196 + m * (2520 ±0)` - // Minimum execution time: 75_734_000 picoseconds. - Weight::from_parts(78_168_395, 6196) - // Standard Error: 63_180 - .saturating_add(Weight::from_parts(1_076_259, 0).saturating_mul(m.into())) + // Minimum execution time: 70_170_000 picoseconds. + Weight::from_parts(71_245_388, 6196) + // Standard Error: 54_382 + .saturating_add(Weight::from_parts(1_488_794, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(T::DbWeight::get().writes(5_u64)) @@ -287,8 +286,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3593` - // Minimum execution time: 46_383_000 picoseconds. - Weight::from_parts(47_405_000, 3593) + // Minimum execution time: 43_414_000 picoseconds. + Weight::from_parts(44_475_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -300,8 +299,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `603` // Estimated: `3550` - // Minimum execution time: 30_994_000 picoseconds. - Weight::from_parts(31_979_000, 3550) + // Minimum execution time: 31_327_000 picoseconds. + Weight::from_parts(32_050_000, 3550) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -315,8 +314,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `601` // Estimated: `3533` - // Minimum execution time: 37_584_000 picoseconds. - Weight::from_parts(44_010_000, 3533) + // Minimum execution time: 41_315_000 picoseconds. + Weight::from_parts(42_421_000, 3533) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -330,10 +329,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn drop_history() -> Weight { // Proof Size summary in bytes: - // Measured: `830` + // Measured: `995` // Estimated: `3593` - // Minimum execution time: 45_266_000 picoseconds. - Weight::from_parts(48_000_000, 3593) + // Minimum execution time: 49_707_000 picoseconds. + Weight::from_parts(51_516_000, 3593) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -343,10 +342,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Broker::AllowedRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) fn drop_renewal() -> Weight { // Proof Size summary in bytes: - // Measured: `525` + // Measured: `661` // Estimated: `4698` - // Minimum execution time: 25_365_000 picoseconds. - Weight::from_parts(26_920_000, 4698) + // Minimum execution time: 26_207_000 picoseconds. + Weight::from_parts(27_227_000, 4698) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -355,22 +354,22 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_519_000 picoseconds. - Weight::from_parts(7_098_698, 0) - // Standard Error: 20 - .saturating_add(Weight::from_parts(8, 0).saturating_mul(n.into())) + // Minimum execution time: 4_670_000 picoseconds. + Weight::from_parts(5_170_450, 0) + // Standard Error: 16 + .saturating_add(Weight::from_parts(37, 0).saturating_mul(n.into())) } - /// Storage: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) + /// Storage: `Broker::CoreCountInbox` (r:1 w:1) + /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. fn process_core_count(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `98` - // Estimated: `3563` - // Minimum execution time: 7_608_000 picoseconds. - Weight::from_parts(8_157_815, 3563) - // Standard Error: 26 - .saturating_add(Weight::from_parts(48, 0).saturating_mul(n.into())) + // Measured: `404` + // Estimated: `1487` + // Minimum execution time: 6_916_000 picoseconds. + Weight::from_parts(7_485_053, 1487) + // Standard Error: 23 + .saturating_add(Weight::from_parts(30, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -386,10 +385,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn process_revenue() -> Weight { // Proof Size summary in bytes: - // Measured: `905` - // Estimated: `4370` - // Minimum execution time: 59_993_000 picoseconds. - Weight::from_parts(61_752_000, 4370) + // Measured: `972` + // Estimated: `4437` + // Minimum execution time: 50_987_000 picoseconds. + Weight::from_parts(52_303_000, 4437) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -408,10 +407,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6281` // Estimated: `8499` - // Minimum execution time: 41_863_000 picoseconds. - Weight::from_parts(44_033_031, 8499) - // Standard Error: 116 - .saturating_add(Weight::from_parts(764, 0).saturating_mul(n.into())) + // Minimum execution time: 38_334_000 picoseconds. + Weight::from_parts(40_517_609, 8499) + // Standard Error: 90 + .saturating_add(Weight::from_parts(338, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(15_u64)) } @@ -423,8 +422,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3493` - // Minimum execution time: 9_588_000 picoseconds. - Weight::from_parts(9_925_000, 3493) + // Minimum execution time: 7_850_000 picoseconds. + Weight::from_parts(8_157_000, 3493) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -436,8 +435,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1423` // Estimated: `4681` - // Minimum execution time: 19_308_000 picoseconds. - Weight::from_parts(20_482_000, 4681) + // Minimum execution time: 17_313_000 picoseconds. + Weight::from_parts(17_727_000, 4681) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -445,28 +444,46 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 147_000 picoseconds. - Weight::from_parts(184_000, 0) + // Minimum execution time: 171_000 picoseconds. + Weight::from_parts(196_000, 0) } + /// Storage: `Broker::CoreCountInbox` (r:0 w:1) + /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) fn notify_core_count() -> Weight { - T::DbWeight::get().reads_writes(1, 1) + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_413_000 picoseconds. + Weight::from_parts(2_587_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::Status` (r:1 w:1) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::Configuration` (r:1 w:0) /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) + /// Storage: `Broker::CoreCountInbox` (r:1 w:0) + /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) fn do_tick_base() -> Weight { // Proof Size summary in bytes: - // Measured: `699` - // Estimated: `4164` - // Minimum execution time: 19_824_000 picoseconds. - Weight::from_parts(20_983_000, 4164) + // Measured: `603` + // Estimated: `4068` + // Minimum execution time: 13_121_000 picoseconds. + Weight::from_parts(13_685_000, 4068) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Broker::Leases` (r:1 w:1) + /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(41), added: 536, mode: `MaxEncodedLen`) + fn swap_leases() -> Weight { + // Proof Size summary in bytes: + // Measured: `239` + // Estimated: `1526` + // Minimum execution time: 6_847_000 picoseconds. + Weight::from_parts(7_185_000, 1526) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) } } @@ -478,8 +495,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_040_000 picoseconds. - Weight::from_parts(3_344_000, 0) + // Minimum execution time: 2_865_000 picoseconds. + Weight::from_parts(3_061_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::Reservations` (r:1 w:1) @@ -488,8 +505,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `5016` // Estimated: `7496` - // Minimum execution time: 21_259_000 picoseconds. - Weight::from_parts(22_110_000, 7496) + // Minimum execution time: 18_431_000 picoseconds. + Weight::from_parts(19_558_000, 7496) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -499,8 +516,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6218` // Estimated: `7496` - // Minimum execution time: 20_330_000 picoseconds. - Weight::from_parts(20_826_000, 7496) + // Minimum execution time: 17_724_000 picoseconds. + Weight::from_parts(18_688_000, 7496) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -510,8 +527,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 13_411_000 picoseconds. - Weight::from_parts(13_960_000, 1526) + // Minimum execution time: 10_513_000 picoseconds. + Weight::from_parts(11_138_000, 1526) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -530,14 +547,12 @@ impl WeightInfo for () { /// Storage: `Broker::Workplan` (r:0 w:10) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn start_sales(n: u32, ) -> Weight { + fn start_sales(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `6330` // Estimated: `8499` - // Minimum execution time: 57_770_000 picoseconds. - Weight::from_parts(61_047_512, 8499) - // Standard Error: 165 - .saturating_add(Weight::from_parts(3, 0).saturating_mul(n.into())) + // Minimum execution time: 50_864_000 picoseconds. + Weight::from_parts(54_000_280, 8499) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(16_u64)) } @@ -553,10 +568,10 @@ impl WeightInfo for () { /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) fn purchase() -> Weight { // Proof Size summary in bytes: - // Measured: `568` - // Estimated: `2053` - // Minimum execution time: 51_196_000 picoseconds. - Weight::from_parts(52_382_000, 2053) + // Measured: `635` + // Estimated: `2120` + // Minimum execution time: 43_630_000 picoseconds. + Weight::from_parts(44_622_000, 2120) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -576,10 +591,10 @@ impl WeightInfo for () { /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: - // Measured: `686` + // Measured: `753` // Estimated: `4698` - // Minimum execution time: 71_636_000 picoseconds. - Weight::from_parts(73_679_000, 4698) + // Minimum execution time: 62_453_000 picoseconds. + Weight::from_parts(63_882_000, 4698) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -589,8 +604,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3550` - // Minimum execution time: 19_182_000 picoseconds. - Weight::from_parts(19_775_000, 3550) + // Minimum execution time: 17_237_000 picoseconds. + Weight::from_parts(17_757_000, 3550) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -600,21 +615,21 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3550` - // Minimum execution time: 20_688_000 picoseconds. - Weight::from_parts(21_557_000, 3550) + // Minimum execution time: 18_504_000 picoseconds. + Weight::from_parts(19_273_000, 3550) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: `Broker::Regions` (r:1 w:2) + /// Storage: `Broker::Regions` (r:1 w:3) /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) fn interlace() -> Weight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3550` - // Minimum execution time: 21_190_000 picoseconds. - Weight::from_parts(22_215_000, 3550) + // Minimum execution time: 20_477_000 picoseconds. + Weight::from_parts(21_328_000, 3550) .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Broker::Configuration` (r:1 w:0) /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) @@ -628,8 +643,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `740` // Estimated: `4681` - // Minimum execution time: 34_591_000 picoseconds. - Weight::from_parts(36_227_000, 4681) + // Minimum execution time: 31_815_000 picoseconds. + Weight::from_parts(32_700_000, 4681) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -647,8 +662,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `775` // Estimated: `5996` - // Minimum execution time: 40_346_000 picoseconds. - Weight::from_parts(41_951_000, 5996) + // Minimum execution time: 38_313_000 picoseconds. + Weight::from_parts(38_985_000, 5996) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -663,10 +678,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `859` // Estimated: `6196 + m * (2520 ±0)` - // Minimum execution time: 75_734_000 picoseconds. - Weight::from_parts(78_168_395, 6196) - // Standard Error: 63_180 - .saturating_add(Weight::from_parts(1_076_259, 0).saturating_mul(m.into())) + // Minimum execution time: 70_170_000 picoseconds. + Weight::from_parts(71_245_388, 6196) + // Standard Error: 54_382 + .saturating_add(Weight::from_parts(1_488_794, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(RocksDbWeight::get().writes(5_u64)) @@ -678,8 +693,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3593` - // Minimum execution time: 46_383_000 picoseconds. - Weight::from_parts(47_405_000, 3593) + // Minimum execution time: 43_414_000 picoseconds. + Weight::from_parts(44_475_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -691,8 +706,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `603` // Estimated: `3550` - // Minimum execution time: 30_994_000 picoseconds. - Weight::from_parts(31_979_000, 3550) + // Minimum execution time: 31_327_000 picoseconds. + Weight::from_parts(32_050_000, 3550) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -706,8 +721,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `601` // Estimated: `3533` - // Minimum execution time: 37_584_000 picoseconds. - Weight::from_parts(44_010_000, 3533) + // Minimum execution time: 41_315_000 picoseconds. + Weight::from_parts(42_421_000, 3533) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -721,10 +736,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn drop_history() -> Weight { // Proof Size summary in bytes: - // Measured: `830` + // Measured: `995` // Estimated: `3593` - // Minimum execution time: 45_266_000 picoseconds. - Weight::from_parts(48_000_000, 3593) + // Minimum execution time: 49_707_000 picoseconds. + Weight::from_parts(51_516_000, 3593) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -734,10 +749,10 @@ impl WeightInfo for () { /// Proof: `Broker::AllowedRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) fn drop_renewal() -> Weight { // Proof Size summary in bytes: - // Measured: `525` + // Measured: `661` // Estimated: `4698` - // Minimum execution time: 25_365_000 picoseconds. - Weight::from_parts(26_920_000, 4698) + // Minimum execution time: 26_207_000 picoseconds. + Weight::from_parts(27_227_000, 4698) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -746,22 +761,22 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_519_000 picoseconds. - Weight::from_parts(7_098_698, 0) - // Standard Error: 20 - .saturating_add(Weight::from_parts(8, 0).saturating_mul(n.into())) + // Minimum execution time: 4_670_000 picoseconds. + Weight::from_parts(5_170_450, 0) + // Standard Error: 16 + .saturating_add(Weight::from_parts(37, 0).saturating_mul(n.into())) } - /// Storage: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) + /// Storage: `Broker::CoreCountInbox` (r:1 w:1) + /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. fn process_core_count(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `98` - // Estimated: `3563` - // Minimum execution time: 7_608_000 picoseconds. - Weight::from_parts(8_157_815, 3563) - // Standard Error: 26 - .saturating_add(Weight::from_parts(48, 0).saturating_mul(n.into())) + // Measured: `404` + // Estimated: `1487` + // Minimum execution time: 6_916_000 picoseconds. + Weight::from_parts(7_485_053, 1487) + // Standard Error: 23 + .saturating_add(Weight::from_parts(30, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -777,10 +792,10 @@ impl WeightInfo for () { /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn process_revenue() -> Weight { // Proof Size summary in bytes: - // Measured: `905` - // Estimated: `4370` - // Minimum execution time: 59_993_000 picoseconds. - Weight::from_parts(61_752_000, 4370) + // Measured: `972` + // Estimated: `4437` + // Minimum execution time: 50_987_000 picoseconds. + Weight::from_parts(52_303_000, 4437) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -799,10 +814,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6281` // Estimated: `8499` - // Minimum execution time: 41_863_000 picoseconds. - Weight::from_parts(44_033_031, 8499) - // Standard Error: 116 - .saturating_add(Weight::from_parts(764, 0).saturating_mul(n.into())) + // Minimum execution time: 38_334_000 picoseconds. + Weight::from_parts(40_517_609, 8499) + // Standard Error: 90 + .saturating_add(Weight::from_parts(338, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(15_u64)) } @@ -814,8 +829,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3493` - // Minimum execution time: 9_588_000 picoseconds. - Weight::from_parts(9_925_000, 3493) + // Minimum execution time: 7_850_000 picoseconds. + Weight::from_parts(8_157_000, 3493) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -827,8 +842,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1423` // Estimated: `4681` - // Minimum execution time: 19_308_000 picoseconds. - Weight::from_parts(20_482_000, 4681) + // Minimum execution time: 17_313_000 picoseconds. + Weight::from_parts(17_727_000, 4681) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -836,28 +851,45 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 147_000 picoseconds. - Weight::from_parts(184_000, 0) + // Minimum execution time: 171_000 picoseconds. + Weight::from_parts(196_000, 0) } + /// Storage: `Broker::CoreCountInbox` (r:0 w:1) + /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) fn notify_core_count() -> Weight { - RocksDbWeight::get().reads(1) - .saturating_add(RocksDbWeight::get().writes(1)) + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_413_000 picoseconds. + Weight::from_parts(2_587_000, 0) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::Status` (r:1 w:1) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::Configuration` (r:1 w:0) /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) + /// Storage: `Broker::CoreCountInbox` (r:1 w:0) + /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) fn do_tick_base() -> Weight { // Proof Size summary in bytes: - // Measured: `699` - // Estimated: `4164` - // Minimum execution time: 19_824_000 picoseconds. - Weight::from_parts(20_983_000, 4164) + // Measured: `603` + // Estimated: `4068` + // Minimum execution time: 13_121_000 picoseconds. + Weight::from_parts(13_685_000, 4068) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `Broker::Leases` (r:1 w:1) + /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(41), added: 536, mode: `MaxEncodedLen`) + fn swap_leases() -> Weight { + // Proof Size summary in bytes: + // Measured: `239` + // Estimated: `1526` + // Minimum execution time: 6_847_000 picoseconds. + Weight::from_parts(7_185_000, 1526) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/substrate/frame/child-bounties/Cargo.toml b/substrate/frame/child-bounties/Cargo.toml index d472c050bd44..8a3ce913615c 100644 --- a/substrate/frame/child-bounties/Cargo.toml +++ b/substrate/frame/child-bounties/Cargo.toml @@ -20,7 +20,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/collective/Cargo.toml b/substrate/frame/collective/Cargo.toml index 6cdc597184f0..ef65a91404b9 100644 --- a/substrate/frame/collective/Cargo.toml +++ b/substrate/frame/collective/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/collective/README.md b/substrate/frame/collective/README.md index 444927e51da2..e860edbd484b 100644 --- a/substrate/frame/collective/README.md +++ b/substrate/frame/collective/README.md @@ -9,7 +9,7 @@ calculations, but enforces this neither in `set_members` nor in `change_members_ A "prime" member may be set to help determine the default vote behavior based on chain config. If `PrimeDefaultVote` is used, the prime vote acts as the default vote in case of any abstentions after the voting period. If `MoreThanMajorityThenPrimeDefaultVote` is used, then -abstentations will first follow the majority of the collective voting, and then the prime +abstentions will first follow the majority of the collective voting, and then the prime member. Voting happens through motions comprising a proposal (i.e. a dispatchable) plus a diff --git a/substrate/frame/collective/src/lib.rs b/substrate/frame/collective/src/lib.rs index 9e261f36cf88..3544a8cddb45 100644 --- a/substrate/frame/collective/src/lib.rs +++ b/substrate/frame/collective/src/lib.rs @@ -286,7 +286,7 @@ pub mod pallet { pub type Members, I: 'static = ()> = StorageValue<_, Vec, ValueQuery>; - /// The prime member that helps determine the default vote behavior in case of absentations. + /// The prime member that helps determine the default vote behavior in case of abstentions. #[pallet::storage] pub type Prime, I: 'static = ()> = StorageValue<_, T::AccountId, OptionQuery>; diff --git a/substrate/frame/collective/src/tests.rs b/substrate/frame/collective/src/tests.rs index 8a80dd167e3d..5240dc215ff7 100644 --- a/substrate/frame/collective/src/tests.rs +++ b/substrate/frame/collective/src/tests.rs @@ -995,7 +995,7 @@ fn motions_all_first_vote_free_works() { Collective::vote(RuntimeOrigin::signed(3), hash, 0, false); assert_eq!(vote_rval.unwrap().pays_fee, Pays::Yes); - // Test close() Extrincis | Check DispatchResultWithPostInfo with Pay Info + // Test close() Extrinsics | Check DispatchResultWithPostInfo with Pay Info let proposal_weight = proposal.get_dispatch_info().weight; let close_rval: DispatchResultWithPostInfo = diff --git a/substrate/frame/contracts/Cargo.toml b/substrate/frame/contracts/Cargo.toml index beac09acd33a..e542c73186de 100644 --- a/substrate/frame/contracts/Cargo.toml +++ b/substrate/frame/contracts/Cargo.toml @@ -18,12 +18,13 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +paste = { version = "1.0", default-features = false } bitflags = "1.3" codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", "max-encoded-len", ] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } log = { workspace = true } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } smallvec = { version = "1", default-features = false, features = [ @@ -56,7 +57,7 @@ xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/x [dev-dependencies] array-bytes = "6.1" assert_matches = "1" -env_logger = "0.9" +env_logger = "0.11" pretty_assertions = "1" wat = "1" pallet-contracts-fixtures = { path = "./fixtures" } diff --git a/substrate/frame/contracts/README.md b/substrate/frame/contracts/README.md index 6e817292e66c..09dc770300ca 100644 --- a/substrate/frame/contracts/README.md +++ b/substrate/frame/contracts/README.md @@ -59,7 +59,7 @@ In general, a contract execution needs to be deterministic so that all nodes com it. To that end we disallow any instructions that could cause indeterminism. Most notable are any floating point arithmetic. That said, sometimes contracts are executed off-chain and hence are not subject to consensus. If code is only executed by a single node and implicitly trusted by other actors is such a case. Trusted execution environments -come to mind. To that end we allow the execution of indeterminstic code for off-chain usages with the following +come to mind. To that end we allow the execution of indeterministic code for off-chain usages with the following constraints: 1. No contract can ever be instantiated from an indeterministic code. The only way to execute the code is to use a diff --git a/substrate/frame/contracts/fixtures/Cargo.toml b/substrate/frame/contracts/fixtures/Cargo.toml index 5ac140cd91b1..8c93c6f16f66 100644 --- a/substrate/frame/contracts/fixtures/Cargo.toml +++ b/substrate/frame/contracts/fixtures/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] frame-system = { path = "../../system" } sp-runtime = { path = "../../../primitives/runtime" } -anyhow = "1.0.0" +anyhow = "1.0.81" [build-dependencies] parity-wasm = "0.45.0" @@ -21,7 +21,7 @@ tempfile = "3.8.1" toml = "0.8.2" twox-hash = "1.6.3" polkavm-linker = { workspace = true, optional = true } -anyhow = "1.0.0" +anyhow = "1.0.81" [features] riscv = ["polkavm-linker"] diff --git a/substrate/frame/contracts/fixtures/contracts/multi_store.rs b/substrate/frame/contracts/fixtures/contracts/multi_store.rs index b83f3995a42b..a78115f01488 100644 --- a/substrate/frame/contracts/fixtures/contracts/multi_store.rs +++ b/substrate/frame/contracts/fixtures/contracts/multi_store.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Does two stores to two seperate storage items +//! Does two stores to two separate storage items #![no_std] #![no_main] diff --git a/substrate/frame/contracts/mock-network/Cargo.toml b/substrate/frame/contracts/mock-network/Cargo.toml index 38f54cd40942..1f255d1dbae5 100644 --- a/substrate/frame/contracts/mock-network/Cargo.toml +++ b/substrate/frame/contracts/mock-network/Cargo.toml @@ -30,7 +30,7 @@ pallet-xcm = { path = "../../../../polkadot/xcm/pallet-xcm", default-features = polkadot-parachain-primitives = { path = "../../../../polkadot/parachain" } polkadot-primitives = { path = "../../../../polkadot/primitives" } polkadot-runtime-parachains = { path = "../../../../polkadot/runtime/parachains" } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-api = { path = "../../../primitives/api", default-features = false } sp-core = { path = "../../../primitives/core", default-features = false } sp-io = { path = "../../../primitives/io", default-features = false } diff --git a/substrate/frame/contracts/mock-network/src/parachain.rs b/substrate/frame/contracts/mock-network/src/parachain.rs index 4c9ce7354af6..76797b9926c1 100644 --- a/substrate/frame/contracts/mock-network/src/parachain.rs +++ b/substrate/frame/contracts/mock-network/src/parachain.rs @@ -281,6 +281,9 @@ impl Config for XcmConfig { type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); } impl mock_msg_queue::Config for Runtime { diff --git a/substrate/frame/contracts/mock-network/src/relay_chain.rs b/substrate/frame/contracts/mock-network/src/relay_chain.rs index 8c79255728eb..470304ed357e 100644 --- a/substrate/frame/contracts/mock-network/src/relay_chain.rs +++ b/substrate/frame/contracts/mock-network/src/relay_chain.rs @@ -182,6 +182,9 @@ impl Config for XcmConfig { type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); } pub type LocalOriginToLocation = SignedToAccountId32; @@ -222,6 +225,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = MessageQueueHeapSize; type MaxStale = MessageQueueMaxStale; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = (); type MessageProcessor = MessageProcessor; type QueueChangeHandler = (); type WeightInfo = (); diff --git a/substrate/frame/contracts/mock-network/src/tests.rs b/substrate/frame/contracts/mock-network/src/tests.rs index d22221fe8ee0..39aa9bebc0f5 100644 --- a/substrate/frame/contracts/mock-network/src/tests.rs +++ b/substrate/frame/contracts/mock-network/src/tests.rs @@ -23,7 +23,6 @@ use crate::{ }; use codec::{Decode, Encode}; use frame_support::{ - assert_err, pallet_prelude::Weight, traits::{fungibles::Mutate, Currency}, }; @@ -102,7 +101,7 @@ fn test_xcm_execute() { 0, Weight::MAX, None, - VersionedXcm::V4(message).encode(), + VersionedXcm::V4(message).encode().encode(), DebugInfo::UnsafeDebug, CollectEvents::UnsafeCollect, Determinism::Enforced, @@ -146,7 +145,7 @@ fn test_xcm_execute_incomplete() { 0, Weight::MAX, None, - VersionedXcm::V4(message).encode(), + VersionedXcm::V4(message).encode().encode(), DebugInfo::UnsafeDebug, CollectEvents::UnsafeCollect, Determinism::Enforced, @@ -160,37 +159,6 @@ fn test_xcm_execute_incomplete() { }); } -#[test] -fn test_xcm_execute_filtered_call() { - MockNet::reset(); - - let contract_addr = instantiate_test_contract("xcm_execute"); - - ParaA::execute_with(|| { - // `remark` should be rejected, as it is not allowed by our CallFilter. - let call = parachain::RuntimeCall::System(frame_system::Call::remark { remark: vec![] }); - let message: Xcm = Xcm(vec![Transact { - origin_kind: OriginKind::Native, - require_weight_at_most: Weight::MAX, - call: call.encode().into(), - }]); - - let result = ParachainContracts::bare_call( - ALICE, - contract_addr.clone(), - 0, - Weight::MAX, - None, - VersionedXcm::V4(message).encode(), - DebugInfo::UnsafeDebug, - CollectEvents::UnsafeCollect, - Determinism::Enforced, - ); - - assert_err!(result.result, frame_system::Error::::CallFiltered); - }); -} - #[test] fn test_xcm_execute_reentrant_call() { MockNet::reset(); @@ -222,7 +190,7 @@ fn test_xcm_execute_reentrant_call() { 0, Weight::MAX, None, - VersionedXcm::V4(message).encode(), + VersionedXcm::V4(message).encode().encode(), DebugInfo::UnsafeDebug, CollectEvents::UnsafeCollect, Determinism::Enforced, @@ -258,7 +226,7 @@ fn test_xcm_send() { 0, Weight::MAX, None, - (dest, message).encode(), + (dest, message.encode()).encode(), DebugInfo::UnsafeDebug, CollectEvents::UnsafeCollect, Determinism::Enforced, diff --git a/substrate/frame/contracts/src/benchmarking/code.rs b/substrate/frame/contracts/src/benchmarking/code.rs index 12402f22e9e9..53da5433fa54 100644 --- a/substrate/frame/contracts/src/benchmarking/code.rs +++ b/substrate/frame/contracts/src/benchmarking/code.rs @@ -165,7 +165,7 @@ impl From for WasmModule { // Grant access to linear memory. // Every contract module is required to have an imported memory. - // If no memory is specified in the passed ModuleDefenition, then + // If no memory is specified in the passed ModuleDefinition, then // default to (1, 1). let (init, max) = if let Some(memory) = &def.memory { (memory.min_pages, Some(memory.max_pages)) diff --git a/substrate/frame/contracts/src/benchmarking/mod.rs b/substrate/frame/contracts/src/benchmarking/mod.rs index e7831957511a..9b86d6475223 100644 --- a/substrate/frame/contracts/src/benchmarking/mod.rs +++ b/substrate/frame/contracts/src/benchmarking/mod.rs @@ -16,7 +16,6 @@ // limitations under the License. //! Benchmarks for the contracts pallet - #![cfg(feature = "runtime-benchmarks")] mod code; @@ -38,7 +37,7 @@ use crate::{ #[cfg(not(feature = "std"))] use alloc::{vec, vec::Vec}; use codec::{Encode, MaxEncodedLen}; -use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller}; +use frame_benchmarking::v2::*; use frame_support::{ pallet_prelude::StorageVersion, traits::{fungible::InspectHold, Currency}, @@ -184,172 +183,215 @@ fn caller_funding() -> BalanceOf { BalanceOf::::max_value() / 10_000u32.into() } -benchmarks! { - where_clause { where +#[benchmarks( + where as codec::HasCompact>::Type: Clone + Eq + PartialEq + core::fmt::Debug + scale_info::TypeInfo + codec::Encode, T: Config + pallet_balances::Config, BalanceOf: From< as Currency>::Balance>, as Currency>::Balance: From>, - } +)] +mod benchmarks { + use super::*; // The base weight consumed on processing contracts deletion queue. - #[pov_mode = Measured] - on_process_deletion_queue_batch {}: { - ContractInfo::::process_deletion_queue_batch(Weight::MAX) + #[benchmark(pov_mode = Measured)] + fn on_process_deletion_queue_batch() { + #[block] + { + ContractInfo::::process_deletion_queue_batch(Weight::MAX); + } } - #[skip_meta] - #[pov_mode = Measured] - on_initialize_per_trie_key { - let k in 0..1024; - let instance = Contract::::with_storage(WasmModule::dummy(), k, T::Schedule::get().limits.payload_len)?; + #[benchmark(skip_meta, pov_mode = Measured)] + fn on_initialize_per_trie_key(k: Linear<0, 1024>) -> Result<(), BenchmarkError> { + let instance = Contract::::with_storage( + WasmModule::dummy(), + k, + T::Schedule::get().limits.payload_len, + )?; instance.info()?.queue_trie_for_deletion(); - }: { - ContractInfo::::process_deletion_queue_batch(Weight::MAX) + + #[block] + { + ContractInfo::::process_deletion_queue_batch(Weight::MAX); + } + + Ok(()) } // This benchmarks the v9 migration step (update codeStorage). - #[pov_mode = Measured] - v9_migration_step { - let c in 0 .. T::MaxCodeLen::get(); + #[benchmark(pov_mode = Measured)] + fn v9_migration_step(c: Linear<0, { T::MaxCodeLen::get() }>) { v09::store_old_dummy_code::(c as usize); let mut m = v09::Migration::::default(); - }: { - m.step(); + #[block] + { + m.step(); + } } // This benchmarks the v10 migration step (use dedicated deposit_account). - #[pov_mode = Measured] - v10_migration_step { - let contract = >::with_caller( - whitelisted_caller(), WasmModule::dummy(), vec![], - )?; - - v10::store_old_contract_info::>(contract.account_id.clone(), contract.info()?); + #[benchmark(pov_mode = Measured)] + fn v10_migration_step() -> Result<(), BenchmarkError> { + let contract = + >::with_caller(whitelisted_caller(), WasmModule::dummy(), vec![])?; + + v10::store_old_contract_info::>( + contract.account_id.clone(), + contract.info()?, + ); let mut m = v10::Migration::>::default(); - }: { - m.step(); + + #[block] + { + m.step(); + } + + Ok(()) } - // This benchmarks the v11 migration step (Don't rely on reserved balances keeping an account alive). - #[pov_mode = Measured] - v11_migration_step { - let k in 0 .. 1024; + // This benchmarks the v11 migration step (Don't rely on reserved balances keeping an account + // alive). + #[benchmark(pov_mode = Measured)] + fn v11_migration_step(k: Linear<0, 1024>) { v11::fill_old_queue::(k as usize); let mut m = v11::Migration::::default(); - }: { - m.step(); + + #[block] + { + m.step(); + } } // This benchmarks the v12 migration step (Move `OwnerInfo` to `CodeInfo`, // add `determinism` field to the latter, clear `CodeStorage` // and repay deposits). - #[pov_mode = Measured] - v12_migration_step { - let c in 0 .. T::MaxCodeLen::get(); - v12::store_old_dummy_code::< - T, - pallet_balances::Pallet - >(c as usize, account::("account", 0, 0)); + #[benchmark(pov_mode = Measured)] + fn v12_migration_step(c: Linear<0, { T::MaxCodeLen::get() }>) { + v12::store_old_dummy_code::>( + c as usize, + account::("account", 0, 0), + ); let mut m = v12::Migration::>::default(); - }: { - m.step(); + + #[block] + { + m.step(); + } } // This benchmarks the v13 migration step (Add delegate_dependencies field). - #[pov_mode = Measured] - v13_migration_step { - let contract = >::with_caller( - whitelisted_caller(), WasmModule::dummy(), vec![], - )?; + #[benchmark(pov_mode = Measured)] + fn v13_migration_step() -> Result<(), BenchmarkError> { + let contract = + >::with_caller(whitelisted_caller(), WasmModule::dummy(), vec![])?; v13::store_old_contract_info::(contract.account_id.clone(), contract.info()?); let mut m = v13::Migration::::default(); - }: { - m.step(); + + #[block] + { + m.step(); + } + Ok(()) } - // This benchmarks the v14 migration step (Move code owners' reserved balance to be held instead). - #[pov_mode = Measured] - v14_migration_step { + // This benchmarks the v14 migration step (Move code owners' reserved balance to be held + // instead). + #[benchmark(pov_mode = Measured)] + fn v14_migration_step() { let account = account::("account", 0, 0); T::Currency::set_balance(&account, caller_funding::()); v14::store_dummy_code::>(account); let mut m = v14::Migration::>::default(); - }: { - m.step(); + + #[block] + { + m.step(); + } } // This benchmarks the v15 migration step (remove deposit account). - #[pov_mode = Measured] - v15_migration_step { - let contract = >::with_caller( - whitelisted_caller(), WasmModule::dummy(), vec![], - )?; + #[benchmark(pov_mode = Measured)] + fn v15_migration_step() -> Result<(), BenchmarkError> { + let contract = + >::with_caller(whitelisted_caller(), WasmModule::dummy(), vec![])?; v15::store_old_contract_info::(contract.account_id.clone(), contract.info()?); let mut m = v15::Migration::::default(); - }: { - m.step(); + + #[block] + { + m.step(); + } + + Ok(()) } // This benchmarks the weight of executing Migration::migrate to execute a noop migration. - #[pov_mode = Measured] - migration_noop { + #[benchmark(pov_mode = Measured)] + fn migration_noop() { let version = LATEST_MIGRATION_VERSION; assert_eq!(StorageVersion::get::>(), version); - }: { - Migration::::migrate(Weight::MAX) - } verify { + #[block] + { + Migration::::migrate(Weight::MAX); + } assert_eq!(StorageVersion::get::>(), version); } - // This benchmarks the weight of dispatching migrate to execute 1 `NoopMigraton` - #[pov_mode = Measured] - migrate { + // This benchmarks the weight of dispatching migrate to execute 1 `NoopMigration` + #[benchmark(pov_mode = Measured)] + fn migrate() { let latest_version = LATEST_MIGRATION_VERSION; StorageVersion::new(latest_version - 2).put::>(); - as frame_support::traits::OnRuntimeUpgrade>::on_runtime_upgrade(); - let caller: T::AccountId = whitelisted_caller(); - let origin = RawOrigin::Signed(caller.clone()); - }: _(origin, Weight::MAX) - verify { + as frame_support::traits::OnRuntimeUpgrade>::on_runtime_upgrade(); + + #[extrinsic_call] + _(RawOrigin::Signed(whitelisted_caller()), Weight::MAX); + assert_eq!(StorageVersion::get::>(), latest_version - 1); } - // This benchmarks the weight of running on_runtime_upgrade when there are no migration in progress. - #[pov_mode = Measured] - on_runtime_upgrade_noop { + // This benchmarks the weight of running on_runtime_upgrade when there are no migration in + // progress. + #[benchmark(pov_mode = Measured)] + fn on_runtime_upgrade_noop() { let latest_version = LATEST_MIGRATION_VERSION; assert_eq!(StorageVersion::get::>(), latest_version); - }: { - as frame_support::traits::OnRuntimeUpgrade>::on_runtime_upgrade() - } verify { + #[block] + { + as frame_support::traits::OnRuntimeUpgrade>::on_runtime_upgrade(); + } assert!(MigrationInProgress::::get().is_none()); } - // This benchmarks the weight of running on_runtime_upgrade when there is a migration in progress. - #[pov_mode = Measured] - on_runtime_upgrade_in_progress { + // This benchmarks the weight of running on_runtime_upgrade when there is a migration in + // progress. + #[benchmark(pov_mode = Measured)] + fn on_runtime_upgrade_in_progress() { let latest_version = LATEST_MIGRATION_VERSION; StorageVersion::new(latest_version - 2).put::>(); let v = vec![42u8].try_into().ok(); MigrationInProgress::::set(v.clone()); - }: { - as frame_support::traits::OnRuntimeUpgrade>::on_runtime_upgrade() - } verify { + #[block] + { + as frame_support::traits::OnRuntimeUpgrade>::on_runtime_upgrade(); + } assert!(MigrationInProgress::::get().is_some()); assert_eq!(MigrationInProgress::::get(), v); } - // This benchmarks the weight of running on_runtime_upgrade when there is a migration to process. - #[pov_mode = Measured] - on_runtime_upgrade { + // This benchmarks the weight of running on_runtime_upgrade when there is a migration to + // process. + #[benchmark(pov_mode = Measured)] + fn on_runtime_upgrade() { let latest_version = LATEST_MIGRATION_VERSION; StorageVersion::new(latest_version - 2).put::>(); - }: { - as frame_support::traits::OnRuntimeUpgrade>::on_runtime_upgrade() - } verify { + #[block] + { + as frame_support::traits::OnRuntimeUpgrade>::on_runtime_upgrade(); + } assert!(MigrationInProgress::::get().is_some()); } @@ -357,17 +399,25 @@ benchmarks! { // the sandbox. This does **not** include the actual execution for which the gas meter // is responsible. This is achieved by generating all code to the `deploy` function // which is in the wasm module but not executed on `call`. - // The results are supposed to be used as `call_with_code_per_byte(c) - call_with_code_per_byte(0)`. - #[pov_mode = Measured] - call_with_code_per_byte { - let c in 0 .. T::MaxCodeLen::get(); + // The results are supposed to be used as `call_with_code_per_byte(c) - + // call_with_code_per_byte(0)`. + #[benchmark(pov_mode = Measured)] + fn call_with_code_per_byte( + c: Linear<0, { T::MaxCodeLen::get() }>, + ) -> Result<(), BenchmarkError> { let instance = Contract::::with_caller( - whitelisted_caller(), WasmModule::sized(c, Location::Deploy, false), vec![], + whitelisted_caller(), + WasmModule::sized(c, Location::Deploy, false), + vec![], )?; let value = Pallet::::min_balance(); - let origin = RawOrigin::Signed(instance.caller.clone()); let callee = instance.addr; - }: call(origin, callee, value, Weight::MAX, None, vec![]) + + #[extrinsic_call] + call(RawOrigin::Signed(instance.caller.clone()), callee, value, Weight::MAX, None, vec![]); + + Ok(()) + } // This constructs a contract that is maximal expensive to instrument. // It creates a maximum number of metering blocks per byte. @@ -379,11 +429,12 @@ benchmarks! { // `c`: Size of the code in bytes. // `i`: Size of the input in bytes. // `s`: Size of the salt in bytes. - #[pov_mode = Measured] - instantiate_with_code { - let c in 0 .. T::MaxCodeLen::get(); - let i in 0 .. code::max_pages::() * 64 * 1024; - let s in 0 .. code::max_pages::() * 64 * 1024; + #[benchmark(pov_mode = Measured)] + fn instantiate_with_code( + c: Linear<0, { T::MaxCodeLen::get() }>, + i: Linear<0, { code::max_pages::() * 64 * 1024 }>, + s: Linear<0, { code::max_pages::() * 64 * 1024 }>, + ) { let input = vec![42u8; i as usize]; let salt = vec![42u8; s as usize]; let value = Pallet::::min_balance(); @@ -392,11 +443,14 @@ benchmarks! { let WasmModule { code, hash, .. } = WasmModule::::sized(c, Location::Call, false); let origin = RawOrigin::Signed(caller.clone()); let addr = Contracts::::contract_address(&caller, &hash, &input, &salt); - }: _(origin, value, Weight::MAX, None, code, input, salt) - verify { - let deposit = T::Currency::balance_on_hold(&HoldReason::StorageDepositReserve.into(), &addr); + #[extrinsic_call] + _(origin, value, Weight::MAX, None, code, input, salt); + + let deposit = + T::Currency::balance_on_hold(&HoldReason::StorageDepositReserve.into(), &addr); // uploading the code reserves some balance in the callers account - let code_deposit = T::Currency::balance_on_hold(&HoldReason::CodeUploadDepositReserve.into(), &caller); + let code_deposit = + T::Currency::balance_on_hold(&HoldReason::CodeUploadDepositReserve.into(), &caller); assert_eq!( T::Currency::balance(&caller), caller_funding::() - value - deposit - code_deposit - Pallet::::min_balance(), @@ -408,22 +462,25 @@ benchmarks! { // Instantiate uses a dummy contract constructor to measure the overhead of the instantiate. // `i`: Size of the input in bytes. // `s`: Size of the salt in bytes. - #[pov_mode = Measured] - instantiate { - let i in 0 .. code::max_pages::() * 64 * 1024; - let s in 0 .. code::max_pages::() * 64 * 1024; + #[benchmark(pov_mode = Measured)] + fn instantiate( + i: Linear<0, { code::max_pages::() * 64 * 1024 }>, + s: Linear<0, { code::max_pages::() * 64 * 1024 }>, + ) -> Result<(), BenchmarkError> { let input = vec![42u8; i as usize]; let salt = vec![42u8; s as usize]; let value = Pallet::::min_balance(); let caller = whitelisted_caller(); T::Currency::set_balance(&caller, caller_funding::()); let WasmModule { code, hash, .. } = WasmModule::::dummy(); - let origin = RawOrigin::Signed(caller.clone()); let addr = Contracts::::contract_address(&caller, &hash, &input, &salt); Contracts::::store_code_raw(code, caller.clone())?; - }: _(origin, value, Weight::MAX, None, hash, input, salt) - verify { - let deposit = T::Currency::balance_on_hold(&HoldReason::StorageDepositReserve.into(), &addr); + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), value, Weight::MAX, None, hash, input, salt); + + let deposit = + T::Currency::balance_on_hold(&HoldReason::StorageDepositReserve.into(), &addr); // value was removed from the caller assert_eq!( T::Currency::balance(&caller), @@ -431,6 +488,8 @@ benchmarks! { ); // contract has the full value assert_eq!(T::Currency::balance(&addr), value + Pallet::::min_balance()); + + Ok(()) } // We just call a dummy contract to measure the overhead of the call extrinsic. @@ -440,19 +499,21 @@ benchmarks! { // part of `seal_input`. The costs for invoking a contract of a specific size are not part // of this benchmark because we cannot know the size of the contract when issuing a call // transaction. See `call_with_code_per_byte` for this. - #[pov_mode = Measured] - call { + #[benchmark(pov_mode = Measured)] + fn call() -> Result<(), BenchmarkError> { let data = vec![42u8; 1024]; - let instance = Contract::::with_caller( - whitelisted_caller(), WasmModule::dummy(), vec![], - )?; + let instance = + Contract::::with_caller(whitelisted_caller(), WasmModule::dummy(), vec![])?; let value = Pallet::::min_balance(); let origin = RawOrigin::Signed(instance.caller.clone()); let callee = instance.addr.clone(); let before = T::Currency::balance(&instance.account_id); - }: _(origin, callee, value, Weight::MAX, None, data) - verify { - let deposit = T::Currency::balance_on_hold(&HoldReason::StorageDepositReserve.into(), &instance.account_id); + #[extrinsic_call] + _(origin, callee, value, Weight::MAX, None, data); + let deposit = T::Currency::balance_on_hold( + &HoldReason::StorageDepositReserve.into(), + &instance.account_id, + ); // value and value transferred via call should be removed from the caller assert_eq!( T::Currency::balance(&instance.caller), @@ -462,92 +523,93 @@ benchmarks! { assert_eq!(T::Currency::balance(&instance.account_id), before + value); // contract should still exist instance.info()?; + + Ok(()) } // This constructs a contract that is maximal expensive to instrument. // It creates a maximum number of metering blocks per byte. // `c`: Size of the code in bytes. - #[pov_mode = Measured] - upload_code_determinism_enforced { - let c in 0 .. T::MaxCodeLen::get(); + #[benchmark(pov_mode = Measured)] + fn upload_code_determinism_enforced(c: Linear<0, { T::MaxCodeLen::get() }>) { let caller = whitelisted_caller(); T::Currency::set_balance(&caller, caller_funding::()); let WasmModule { code, hash, .. } = WasmModule::::sized(c, Location::Call, false); let origin = RawOrigin::Signed(caller.clone()); - }: upload_code(origin, code, None, Determinism::Enforced) - verify { + #[extrinsic_call] + upload_code(origin, code, None, Determinism::Enforced); // uploading the code reserves some balance in the callers account assert!(T::Currency::total_balance_on_hold(&caller) > 0u32.into()); assert!(>::code_exists(&hash)); } - // Uploading code with [`Determinism::Relaxed`] should be more expensive than uploading code with [`Determinism::Enforced`], - // as we always try to save the code with [`Determinism::Enforced`] first. - #[pov_mode = Measured] - upload_code_determinism_relaxed { - let c in 0 .. T::MaxCodeLen::get(); + // Uploading code with [`Determinism::Relaxed`] should be more expensive than uploading code + // with [`Determinism::Enforced`], as we always try to save the code with + // [`Determinism::Enforced`] first. + #[benchmark(pov_mode = Measured)] + fn upload_code_determinism_relaxed(c: Linear<0, { T::MaxCodeLen::get() }>) { let caller = whitelisted_caller(); T::Currency::set_balance(&caller, caller_funding::()); let WasmModule { code, hash, .. } = WasmModule::::sized(c, Location::Call, true); let origin = RawOrigin::Signed(caller.clone()); - }: upload_code(origin, code, None, Determinism::Relaxed) - verify { + #[extrinsic_call] + upload_code(origin, code, None, Determinism::Relaxed); assert!(T::Currency::total_balance_on_hold(&caller) > 0u32.into()); assert!(>::code_exists(&hash)); - // Ensure that the benchmark follows the most expensive path, i.e., the code is saved with [`Determinism::Relaxed`] after trying to save it with [`Determinism::Enforced`]. + // Ensure that the benchmark follows the most expensive path, i.e., the code is saved with assert_eq!(CodeInfoOf::::get(&hash).unwrap().determinism(), Determinism::Relaxed); } // Removing code does not depend on the size of the contract because all the information // needed to verify the removal claim (refcount, owner) is stored in a separate storage // item (`CodeInfoOf`). - #[pov_mode = Measured] - remove_code { + #[benchmark(pov_mode = Measured)] + fn remove_code() -> Result<(), BenchmarkError> { let caller = whitelisted_caller(); T::Currency::set_balance(&caller, caller_funding::()); let WasmModule { code, hash, .. } = WasmModule::::dummy(); let origin = RawOrigin::Signed(caller.clone()); - let uploaded = >::bare_upload_code(caller.clone(), code, None, Determinism::Enforced)?; + let uploaded = + >::bare_upload_code(caller.clone(), code, None, Determinism::Enforced)?; assert_eq!(uploaded.code_hash, hash); assert_eq!(uploaded.deposit, T::Currency::total_balance_on_hold(&caller)); assert!(>::code_exists(&hash)); - }: _(origin, hash) - verify { + #[extrinsic_call] + _(origin, hash); // removing the code should have unreserved the deposit assert_eq!(T::Currency::total_balance_on_hold(&caller), 0u32.into()); assert!(>::code_removed(&hash)); + Ok(()) } - #[pov_mode = Measured] - set_code { - let instance = >::with_caller( - whitelisted_caller(), WasmModule::dummy(), vec![], - )?; + #[benchmark(pov_mode = Measured)] + fn set_code() -> Result<(), BenchmarkError> { + let instance = + >::with_caller(whitelisted_caller(), WasmModule::dummy(), vec![])?; // we just add some bytes so that the code hash is different let WasmModule { code, hash, .. } = >::dummy_with_bytes(128); >::store_code_raw(code, instance.caller.clone())?; let callee = instance.addr.clone(); assert_ne!(instance.info()?.code_hash, hash); - }: _(RawOrigin::Root, callee, hash) - verify { + #[extrinsic_call] + _(RawOrigin::Root, callee, hash); assert_eq!(instance.info()?.code_hash, hash); + Ok(()) } - #[pov_mode = Measured] - seal_caller { - let r in 0 .. API_BENCHMARK_RUNS; - let instance = Contract::::new(WasmModule::getter( - "seal0", "seal_caller", r - ), vec![])?; + #[benchmark(pov_mode = Measured)] + fn seal_caller(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { + let instance = Contract::::new(WasmModule::getter("seal0", "seal_caller", r), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); - #[pov_mode = Measured] - seal_is_contract { - let r in 0 .. API_BENCHMARK_RUNS; - let accounts = (0 .. r) - .map(|n| account::("account", n, 0)) - .collect::>(); + Ok(()) + } + + #[benchmark(pov_mode = Measured)] + fn seal_is_contract(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { + let accounts = (0..r).map(|n| account::("account", n, 0)).collect::>(); let account_len = accounts.get(0).map(|i| i.encode().len()).unwrap_or(0); let accounts_bytes = accounts.iter().flat_map(|a| a.encode()).collect::>(); let code = WasmModule::::from(ModuleDefinition { @@ -558,18 +620,16 @@ benchmarks! { params: vec![ValueType::I32], return_type: Some(ValueType::I32), }], - data_segments: vec![ - DataSegment { - offset: 0, - value: accounts_bytes - }, - ], - call_body: Some(body::repeated_dyn(r, vec![ - Counter(0, account_len as u32), // address_ptr - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ])), - .. Default::default() + data_segments: vec![DataSegment { offset: 0, value: accounts_bytes }], + call_body: Some(body::repeated_dyn( + r, + vec![ + Counter(0, account_len as u32), // address_ptr + Regular(Instruction::Call(0)), + Regular(Instruction::Drop), + ], + )), + ..Default::default() }); let instance = Contract::::new(code, vec![])?; let info = instance.info()?; @@ -578,18 +638,17 @@ benchmarks! { >::insert(acc, info.clone()); } let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); - #[pov_mode = Measured] - seal_code_hash { - let r in 0 .. API_BENCHMARK_RUNS; - let accounts = (0 .. r) - .map(|n| account::("account", n, 0)) - .collect::>(); + Ok(()) + } + + #[benchmark(pov_mode = Measured)] + fn seal_code_hash(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { + let accounts = (0..r).map(|n| account::("account", n, 0)).collect::>(); let account_len = accounts.get(0).map(|i| i.encode().len()).unwrap_or(0); let accounts_bytes = accounts.iter().flat_map(|a| a.encode()).collect::>(); - let accounts_len = accounts_bytes.len(); - let pages = code::max_pages::(); let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -603,19 +662,19 @@ benchmarks! { offset: 0, value: 32u32.to_le_bytes().to_vec(), // output length }, - DataSegment { - offset: 36, - value: accounts_bytes, - }, + DataSegment { offset: 36, value: accounts_bytes }, ], - call_body: Some(body::repeated_dyn(r, vec![ - Counter(36, account_len as u32), // address_ptr - Regular(Instruction::I32Const(4)), // ptr to output data - Regular(Instruction::I32Const(0)), // ptr to output length - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ])), - .. Default::default() + call_body: Some(body::repeated_dyn( + r, + vec![ + Counter(36, account_len as u32), // address_ptr + Regular(Instruction::I32Const(4)), // ptr to output data + Regular(Instruction::I32Const(0)), // ptr to output length + Regular(Instruction::Call(0)), + Regular(Instruction::Drop), + ], + )), + ..Default::default() }); let instance = Contract::::new(code, vec![])?; let info = instance.info()?; @@ -624,20 +683,23 @@ benchmarks! { >::insert(acc, info.clone()); } let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) - - #[pov_mode = Measured] - seal_own_code_hash { - let r in 0 .. API_BENCHMARK_RUNS; - let instance = Contract::::new(WasmModule::getter( - "seal0", "seal_own_code_hash", r - ), vec![])?; + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } + + #[benchmark(pov_mode = Measured)] + fn seal_own_code_hash(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { + let instance = + Contract::::new(WasmModule::getter("seal0", "seal_own_code_hash", r), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } - #[pov_mode = Measured] - seal_caller_is_origin { - let r in 0 .. API_BENCHMARK_RUNS; + #[benchmark(pov_mode = Measured)] + fn seal_caller_is_origin(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -646,20 +708,18 @@ benchmarks! { params: vec![], return_type: Some(ValueType::I32), }], - call_body: Some(body::repeated(r, &[ - Instruction::Call(0), - Instruction::Drop, - ])), - .. Default::default() + call_body: Some(body::repeated(r, &[Instruction::Call(0), Instruction::Drop])), + ..Default::default() }); let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) - - #[pov_mode = Measured] - seal_caller_is_root { - let r in 0 .. API_BENCHMARK_RUNS; + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } + #[benchmark(pov_mode = Measured)] + fn seal_caller_is_root(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -668,82 +728,84 @@ benchmarks! { params: vec![], return_type: Some(ValueType::I32), }], - call_body: Some(body::repeated(r, &[ - Instruction::Call(0), - Instruction::Drop, - ])), - .. Default::default() + call_body: Some(body::repeated(r, &[Instruction::Call(0), Instruction::Drop])), + ..Default::default() }); let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Root; - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) - - #[pov_mode = Measured] - seal_address { - let r in 0 .. API_BENCHMARK_RUNS; - let instance = Contract::::new(WasmModule::getter( - "seal0", "seal_address", r - ), vec![])?; + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } + + #[benchmark(pov_mode = Measured)] + fn seal_address(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { + let instance = Contract::::new(WasmModule::getter("seal0", "seal_address", r), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) - - #[pov_mode = Measured] - seal_gas_left { - let r in 0 .. API_BENCHMARK_RUNS; - let instance = Contract::::new(WasmModule::getter( - "seal1", "gas_left", r - ), vec![])?; + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } + + #[benchmark(pov_mode = Measured)] + fn seal_gas_left(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { + let instance = Contract::::new(WasmModule::getter("seal1", "gas_left", r), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) - - #[pov_mode = Measured] - seal_balance { - let r in 0 .. API_BENCHMARK_RUNS; - let instance = Contract::::new(WasmModule::getter( - "seal0", "seal_balance", r - ), vec![])?; + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } + + #[benchmark(pov_mode = Measured)] + fn seal_balance(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { + let instance = Contract::::new(WasmModule::getter("seal0", "seal_balance", r), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) - - #[pov_mode = Measured] - seal_value_transferred { - let r in 0 .. API_BENCHMARK_RUNS; - let instance = Contract::::new(WasmModule::getter( - "seal0", "seal_value_transferred", r - ), vec![])?; + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } + + #[benchmark(pov_mode = Measured)] + fn seal_value_transferred(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { + let instance = + Contract::::new(WasmModule::getter("seal0", "seal_value_transferred", r), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) - - #[pov_mode = Measured] - seal_minimum_balance { - let r in 0 .. API_BENCHMARK_RUNS; - let instance = Contract::::new(WasmModule::getter( - "seal0", "seal_minimum_balance", r - ), vec![])?; + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } + + #[benchmark(pov_mode = Measured)] + fn seal_minimum_balance(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { + let instance = + Contract::::new(WasmModule::getter("seal0", "seal_minimum_balance", r), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) - - #[pov_mode = Measured] - seal_block_number { - let r in 0 .. API_BENCHMARK_RUNS; - let instance = Contract::::new(WasmModule::getter( - "seal0", "seal_block_number", r - ), vec![])?; + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } + + #[benchmark(pov_mode = Measured)] + fn seal_block_number(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { + let instance = + Contract::::new(WasmModule::getter("seal0", "seal_block_number", r), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) - - #[pov_mode = Measured] - seal_now { - let r in 0 .. API_BENCHMARK_RUNS; - let instance = Contract::::new(WasmModule::getter( - "seal0", "seal_now", r - ), vec![])?; + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } + + #[benchmark(pov_mode = Measured)] + fn seal_now(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { + let instance = Contract::::new(WasmModule::getter("seal0", "seal_now", r), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } - #[pov_mode = Measured] - seal_weight_to_fee { - let r in 0 .. API_BENCHMARK_RUNS; + #[benchmark(pov_mode = Measured)] + fn seal_weight_to_fee(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { let pages = code::max_pages::(); let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), @@ -757,22 +819,27 @@ benchmarks! { offset: 0, value: (pages * 64 * 1024 - 4).to_le_bytes().to_vec(), }], - call_body: Some(body::repeated(r, &[ - Instruction::I64Const(500_000), - Instruction::I64Const(300_000), - Instruction::I32Const(4), - Instruction::I32Const(0), - Instruction::Call(0), - ])), - .. Default::default() + call_body: Some(body::repeated( + r, + &[ + Instruction::I64Const(500_000), + Instruction::I64Const(300_000), + Instruction::I32Const(4), + Instruction::I32Const(0), + Instruction::Call(0), + ], + )), + ..Default::default() }); let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } - #[pov_mode = Measured] - seal_input { - let r in 0 .. API_BENCHMARK_RUNS; + #[benchmark(pov_mode = Measured)] + fn seal_input(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -781,26 +848,28 @@ benchmarks! { params: vec![ValueType::I32, ValueType::I32], return_type: None, }], - data_segments: vec![ - DataSegment { - offset: 0, - value: 0u32.to_le_bytes().to_vec(), - }, - ], - call_body: Some(body::repeated(r, &[ - Instruction::I32Const(4), // ptr where to store output - Instruction::I32Const(0), // ptr to length - Instruction::Call(0), - ])), - .. Default::default() + data_segments: vec![DataSegment { offset: 0, value: 0u32.to_le_bytes().to_vec() }], + call_body: Some(body::repeated( + r, + &[ + Instruction::I32Const(4), // ptr where to store output + Instruction::I32Const(0), // ptr to length + Instruction::Call(0), + ], + )), + ..Default::default() }); let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } - #[pov_mode = Measured] - seal_input_per_byte { - let n in 0 .. code::max_pages::() * 64 * 1024; + #[benchmark(pov_mode = Measured)] + fn seal_input_per_byte( + n: Linear<0, { code::max_pages::() * 64 * 1024 }>, + ) -> Result<(), BenchmarkError> { let buffer_size = code::max_pages::() * 64 * 1024 - 4; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), @@ -810,31 +879,31 @@ benchmarks! { params: vec![ValueType::I32, ValueType::I32], return_type: None, }], - data_segments: vec![ - DataSegment { - offset: 0, - value: buffer_size.to_le_bytes().to_vec(), - }, - ], + data_segments: vec![DataSegment { + offset: 0, + value: buffer_size.to_le_bytes().to_vec(), + }], call_body: Some(body::plain(vec![ Instruction::I32Const(4), // ptr where to store output Instruction::I32Const(0), // ptr to length Instruction::Call(0), Instruction::End, ])), - .. Default::default() + ..Default::default() }); let instance = Contract::::new(code, vec![])?; let data = vec![42u8; n.min(buffer_size) as usize]; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, data) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, data); + Ok(()) + } // We cannot call `seal_return` multiple times. Therefore our weight determination is not // as precise as with other APIs. Because this function can only be called once per // contract it cannot be used as an attack vector. - #[pov_mode = Measured] - seal_return { - let r in 0 .. 1; + #[benchmark(pov_mode = Measured)] + fn seal_return(r: Linear<0, 1>) -> Result<(), BenchmarkError> { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -843,21 +912,28 @@ benchmarks! { params: vec![ValueType::I32, ValueType::I32, ValueType::I32], return_type: None, }], - call_body: Some(body::repeated(r, &[ - Instruction::I32Const(0), // flags - Instruction::I32Const(0), // data_ptr - Instruction::I32Const(0), // data_len - Instruction::Call(0), - ])), - .. Default::default() + call_body: Some(body::repeated( + r, + &[ + Instruction::I32Const(0), // flags + Instruction::I32Const(0), // data_ptr + Instruction::I32Const(0), // data_len + Instruction::Call(0), + ], + )), + ..Default::default() }); let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } - #[pov_mode = Measured] - seal_return_per_byte { - let n in 0 .. code::max_pages::() * 64 * 1024; + #[benchmark(pov_mode = Measured)] + fn seal_return_per_byte( + n: Linear<0, { code::max_pages::() * 64 * 1024 }>, + ) -> Result<(), BenchmarkError> { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -867,22 +943,24 @@ benchmarks! { return_type: None, }], call_body: Some(body::plain(vec![ - Instruction::I32Const(0), // flags - Instruction::I32Const(0), // data_ptr + Instruction::I32Const(0), // flags + Instruction::I32Const(0), // data_ptr Instruction::I32Const(n as i32), // data_len Instruction::Call(0), Instruction::End, ])), - .. Default::default() + ..Default::default() }); let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } // The same argument as for `seal_return` is true here. - #[pov_mode = Measured] - seal_terminate { - let r in 0 .. 1; + #[benchmark(pov_mode = Measured)] + fn seal_terminate(r: Linear<0, 1>) -> Result<(), BenchmarkError> { let beneficiary = account::("beneficiary", 0, 0); let beneficiary_bytes = beneficiary.encode(); let beneficiary_len = beneficiary_bytes.len(); @@ -915,50 +993,74 @@ benchmarks! { name: "lock_delegate_dependency", params: vec![ValueType::I32], return_type: None, - } + }, ], data_segments: vec![ - DataSegment { - offset: 0, - value: beneficiary_bytes, - }, - DataSegment { - offset: beneficiary_len as u32, - value: code_hashes_bytes, - }, + DataSegment { offset: 0, value: beneficiary_bytes }, + DataSegment { offset: beneficiary_len as u32, value: code_hashes_bytes }, ], - deploy_body: Some(body::repeated_dyn(T::MaxDelegateDependencies::get(), vec![ - Counter(beneficiary_len as u32, code_hash_len as u32), // code_hash_ptr - Regular(Instruction::Call(1)), - ])), - call_body: Some(body::repeated(r, &[ - Instruction::I32Const(0), // beneficiary_ptr - Instruction::I32Const(beneficiary_len as i32), // beneficiary_len - Instruction::Call(0), - ])), - .. Default::default() + deploy_body: Some(body::repeated_dyn( + T::MaxDelegateDependencies::get(), + vec![ + Counter(beneficiary_len as u32, code_hash_len as u32), // code_hash_ptr + Regular(Instruction::Call(1)), + ], + )), + call_body: Some(body::repeated( + r, + &[ + Instruction::I32Const(0), // beneficiary_ptr + Instruction::I32Const(beneficiary_len as i32), // beneficiary_len + Instruction::Call(0), + ], + )), + ..Default::default() }); let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); assert_eq!(T::Currency::total_balance(&beneficiary), 0u32.into()); - assert_eq!(T::Currency::balance(&instance.account_id), Pallet::::min_balance() * 2u32.into()); - assert_ne!(T::Currency::balance_on_hold(&HoldReason::StorageDepositReserve.into(), &instance.account_id), 0u32.into()); - assert_eq!(ContractInfoOf::::get(&instance.account_id).unwrap().delegate_dependencies_count() as u32, T::MaxDelegateDependencies::get()); - }: call(origin, instance.addr.clone(), 0u32.into(), Weight::MAX, None, vec![]) - verify { + assert_eq!( + T::Currency::balance(&instance.account_id), + Pallet::::min_balance() * 2u32.into() + ); + assert_ne!( + T::Currency::balance_on_hold( + &HoldReason::StorageDepositReserve.into(), + &instance.account_id + ), + 0u32.into() + ); + assert_eq!( + ContractInfoOf::::get(&instance.account_id) + .unwrap() + .delegate_dependencies_count() as u32, + T::MaxDelegateDependencies::get() + ); + #[extrinsic_call] + call(origin, instance.addr.clone(), 0u32.into(), Weight::MAX, None, vec![]); + if r > 0 { assert_eq!(T::Currency::total_balance(&instance.account_id), 0u32.into()); - assert_eq!(T::Currency::balance_on_hold(&HoldReason::StorageDepositReserve.into(), &instance.account_id), 0u32.into()); - assert_eq!(T::Currency::total_balance(&beneficiary), Pallet::::min_balance() * 2u32.into()); + assert_eq!( + T::Currency::balance_on_hold( + &HoldReason::StorageDepositReserve.into(), + &instance.account_id + ), + 0u32.into() + ); + assert_eq!( + T::Currency::total_balance(&beneficiary), + Pallet::::min_balance() * 2u32.into() + ); } + Ok(()) } // We benchmark only for the maximum subject length. We assume that this is some lowish // number (< 1 KB). Therefore we are not overcharging too much in case a smaller subject is // used. - #[pov_mode = Measured] - seal_random { - let r in 0 .. API_BENCHMARK_RUNS; + #[benchmark(pov_mode = Measured)] + fn seal_random(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { let pages = code::max_pages::(); let subject_len = T::Schedule::get().limits.subject_len; assert!(subject_len < 1024); @@ -970,30 +1072,33 @@ benchmarks! { params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], return_type: None, }], - data_segments: vec![ - DataSegment { - offset: 0, - value: (pages * 64 * 1024 - subject_len - 4).to_le_bytes().to_vec(), - }, - ], - call_body: Some(body::repeated(r, &[ - Instruction::I32Const(4), // subject_ptr - Instruction::I32Const(subject_len as i32), // subject_len - Instruction::I32Const((subject_len + 4) as i32), // out_ptr - Instruction::I32Const(0), // out_len_ptr - Instruction::Call(0), - ])), - .. Default::default() + data_segments: vec![DataSegment { + offset: 0, + value: (pages * 64 * 1024 - subject_len - 4).to_le_bytes().to_vec(), + }], + call_body: Some(body::repeated( + r, + &[ + Instruction::I32Const(4), // subject_ptr + Instruction::I32Const(subject_len as i32), // subject_len + Instruction::I32Const((subject_len + 4) as i32), // out_ptr + Instruction::I32Const(0), // out_len_ptr + Instruction::Call(0), + ], + )), + ..Default::default() }); let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } // Overhead of calling the function without any topic. // We benchmark for the worst case (largest event). - #[pov_mode = Measured] - seal_deposit_event { - let r in 0 .. API_BENCHMARK_RUNS; + #[benchmark(pov_mode = Measured)] + fn seal_deposit_event(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -1002,26 +1107,33 @@ benchmarks! { params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], return_type: None, }], - call_body: Some(body::repeated(r, &[ - Instruction::I32Const(0), // topics_ptr - Instruction::I32Const(0), // topics_len - Instruction::I32Const(0), // data_ptr - Instruction::I32Const(0), // data_len - Instruction::Call(0), - ])), - .. Default::default() + call_body: Some(body::repeated( + r, + &[ + Instruction::I32Const(0), // topics_ptr + Instruction::I32Const(0), // topics_len + Instruction::I32Const(0), // data_ptr + Instruction::I32Const(0), // data_len + Instruction::Call(0), + ], + )), + ..Default::default() }); let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } // Benchmark the overhead that topics generate. // `t`: Number of topics // `n`: Size of event payload in bytes - #[pov_mode = Measured] - seal_deposit_event_per_topic_and_byte { - let t in 0 .. T::Schedule::get().limits.event_topics; - let n in 0 .. T::Schedule::get().limits.payload_len; + #[benchmark(pov_mode = Measured)] + fn seal_deposit_event_per_topic_and_byte( + t: Linear<0, { T::Schedule::get().limits.event_topics }>, + n: Linear<0, { T::Schedule::get().limits.payload_len }>, + ) -> Result<(), BenchmarkError> { let topics = (0..t).map(|i| T::Hashing::hash_of(&i)).collect::>().encode(); let topics_len = topics.len(); let code = WasmModule::::from(ModuleDefinition { @@ -1032,32 +1144,29 @@ benchmarks! { params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], return_type: None, }], - data_segments: vec![ - DataSegment { - offset: 0, - value: topics, - }, - ], + data_segments: vec![DataSegment { offset: 0, value: topics }], call_body: Some(body::plain(vec![ - Instruction::I32Const(0), // topics_ptr + Instruction::I32Const(0), // topics_ptr Instruction::I32Const(topics_len as i32), // topics_len - Instruction::I32Const(0), // data_ptr - Instruction::I32Const(n as i32), // data_len + Instruction::I32Const(0), // data_ptr + Instruction::I32Const(n as i32), // data_len Instruction::Call(0), Instruction::End, ])), - .. Default::default() + ..Default::default() }); let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } // Benchmark debug_message call with zero input data. // Whereas this function is used in RPC mode only, it still should be secured // against an excessive use. - #[pov_mode = Measured] - seal_debug_message { - let r in 0 .. API_BENCHMARK_RUNS; + #[benchmark(pov_mode = Measured)] + fn seal_debug_message(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory { min_pages: 1, max_pages: 1 }), imported_functions: vec![ImportedFunction { @@ -1066,38 +1175,56 @@ benchmarks! { params: vec![ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], - call_body: Some(body::repeated(r, &[ - Instruction::I32Const(0), // value_ptr - Instruction::I32Const(0), // value_len - Instruction::Call(0), - Instruction::Drop, - ])), - .. Default::default() + call_body: Some(body::repeated( + r, + &[ + Instruction::I32Const(0), // value_ptr + Instruction::I32Const(0), // value_len + Instruction::Call(0), + Instruction::Drop, + ], + )), + ..Default::default() }); let instance = Contract::::new(code, vec![])?; - }: { - >::bare_call( - instance.caller, - instance.account_id, - 0u32.into(), - Weight::MAX, - None, - vec![], - DebugInfo::UnsafeDebug, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result?; + + #[block] + { + >::bare_call( + instance.caller, + instance.account_id, + 0u32.into(), + Weight::MAX, + None, + vec![], + DebugInfo::UnsafeDebug, + CollectEvents::Skip, + Determinism::Enforced, + ) + .result?; + } + Ok(()) } - seal_debug_message_per_byte { - // Vary size of input in bytes up to maximum allowed contract memory - // or maximum allowed debug buffer size, whichever is less. - let i in 0 .. (T::Schedule::get().limits.memory_pages * 64 * 1024).min(T::MaxDebugBufferLen::get()); + // Vary size of input in bytes up to maximum allowed contract memory + // or maximum allowed debug buffer size, whichever is less. + #[benchmark] + fn seal_debug_message_per_byte( + i: Linear< + 0, + { + (T::Schedule::get().limits.memory_pages * 64 * 1024) + .min(T::MaxDebugBufferLen::get()) + }, + >, + ) -> Result<(), BenchmarkError> { // We benchmark versus messages containing printable ASCII codes. // About 1Kb goes to the contract code instructions, // whereas all the space left we use for the initialization of the debug messages data. - let message = (0 .. T::MaxCodeLen::get() - 1024).zip((32..127).cycle()).map(|i| i.1).collect::>(); + let message = (0..T::MaxCodeLen::get() - 1024) + .zip((32..127).cycle()) + .map(|i| i.1) + .collect::>(); let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory { min_pages: T::Schedule::get().limits.memory_pages, @@ -1108,15 +1235,10 @@ benchmarks! { name: "seal_debug_message", params: vec![ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), - }], - data_segments: vec![ - DataSegment { - offset: 0, - value: message, - }, - ], + }], + data_segments: vec![DataSegment { offset: 0, value: message }], call_body: Some(body::plain(vec![ - Instruction::I32Const(0), // value_ptr + Instruction::I32Const(0), // value_ptr Instruction::I32Const(i as i32), // value_len Instruction::Call(0), Instruction::Drop, @@ -1125,22 +1247,25 @@ benchmarks! { ..Default::default() }); let instance = Contract::::new(code, vec![])?; - }: { - >::bare_call( - instance.caller, - instance.account_id, - 0u32.into(), - Weight::MAX, - None, - vec![], - DebugInfo::UnsafeDebug, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result?; - } - - // Only the overhead of calling the function itself with minimal arguments. + #[block] + { + >::bare_call( + instance.caller, + instance.account_id, + 0u32.into(), + Weight::MAX, + None, + vec![], + DebugInfo::UnsafeDebug, + CollectEvents::Skip, + Determinism::Enforced, + ) + .result?; + } + Ok(()) + } + + // Only the overhead of calling the function itself with minimal arguments. // The contract is a bit more complex because it needs to use different keys in order // to generate unique storage accesses. However, it is still dominated by the storage // accesses. We store something at all the keys that we are about to write to @@ -1151,15 +1276,16 @@ benchmarks! { // // We need to use a smaller `r` because the keys are big and writing them all into the wasm // might exceed the code size. - #[skip_meta] - #[pov_mode = Measured] - seal_set_storage { - let r in 0 .. API_BENCHMARK_RUNS/2; + #[benchmark(skip_meta, pov_mode = Measured)] + fn seal_set_storage(r: Linear<0, { API_BENCHMARK_RUNS / 2 }>) -> Result<(), BenchmarkError> { let max_key_len = T::MaxStorageKeyLen::get(); - let keys = (0 .. r) - .map(|n| { let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); - h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); h }) - .collect::>(); + let keys = (0..r) + .map(|n| { + let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); + h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); + h + }) + .collect::>(); let keys_bytes = keys.iter().flatten().cloned().collect::>(); let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), @@ -1169,27 +1295,25 @@ benchmarks! { params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], - data_segments: vec![ - DataSegment { - offset: 0, - value: keys_bytes, - }, - ], - call_body: Some(body::repeated_dyn(r, vec![ - Counter(0, max_key_len as u32), // key_ptr - Regular(Instruction::I32Const(max_key_len as i32)), // key_len - Regular(Instruction::I32Const(0)), // value_ptr - Regular(Instruction::I32Const(0)), // value_len - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ])), - .. Default::default() + data_segments: vec![DataSegment { offset: 0, value: keys_bytes }], + call_body: Some(body::repeated_dyn( + r, + vec![ + Counter(0, max_key_len as u32), // key_ptr + Regular(Instruction::I32Const(max_key_len as i32)), // key_len + Regular(Instruction::I32Const(0)), // value_ptr + Regular(Instruction::I32Const(0)), // value_len + Regular(Instruction::Call(0)), + Regular(Instruction::Drop), + ], + )), + ..Default::default() }); let instance = Contract::::new(code, vec![])?; let info = instance.info()?; for key in keys { info.write( - &Key::::try_from_var(key).map_err(|e| "Key has wrong length")?, + &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, Some(vec![]), None, false, @@ -1197,12 +1321,15 @@ benchmarks! { .map_err(|_| "Failed to write to storage during setup.")?; } let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } - #[skip_meta] - #[pov_mode = Measured] - seal_set_storage_per_new_byte { - let n in 0 .. T::Schedule::get().limits.payload_len; + #[benchmark(skip_meta, pov_mode = Measured)] + fn seal_set_storage_per_new_byte( + n: Linear<0, { T::Schedule::get().limits.payload_len }>, + ) -> Result<(), BenchmarkError> { let max_key_len = T::MaxStorageKeyLen::get(); let key = vec![0u8; max_key_len as usize]; let code = WasmModule::::from(ModuleDefinition { @@ -1213,39 +1340,37 @@ benchmarks! { params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], - data_segments: vec![ - DataSegment { - offset: 0, - value: key.clone(), - }, - ], + data_segments: vec![DataSegment { offset: 0, value: key.clone() }], call_body: Some(body::plain(vec![ - Instruction::I32Const(0), // key_ptr + Instruction::I32Const(0), // key_ptr Instruction::I32Const(max_key_len as i32), // key_len - Instruction::I32Const(0), // value_ptr - Instruction::I32Const(n as i32), // value_len + Instruction::I32Const(0), // value_ptr + Instruction::I32Const(n as i32), // value_len Instruction::Call(0), Instruction::Drop, Instruction::End, ])), - .. Default::default() + ..Default::default() }); let instance = Contract::::new(code, vec![])?; let info = instance.info()?; info.write( - &Key::::try_from_var(key).map_err(|e| "Key has wrong length")?, + &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, Some(vec![]), None, false, ) .map_err(|_| "Failed to write to storage during setup.")?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } - #[skip_meta] - #[pov_mode = Measured] - seal_set_storage_per_old_byte { - let n in 0 .. T::Schedule::get().limits.payload_len; + #[benchmark(skip_meta, pov_mode = Measured)] + fn seal_set_storage_per_old_byte( + n: Linear<0, { T::Schedule::get().limits.payload_len }>, + ) -> Result<(), BenchmarkError> { let max_key_len = T::MaxStorageKeyLen::get(); let key = vec![0u8; max_key_len as usize]; let code = WasmModule::::from(ModuleDefinition { @@ -1256,48 +1381,48 @@ benchmarks! { params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], - data_segments: vec![ - DataSegment { - offset: 0, - value: key.clone(), - }, - ], + data_segments: vec![DataSegment { offset: 0, value: key.clone() }], call_body: Some(body::plain(vec![ - Instruction::I32Const(0), // key_ptr + Instruction::I32Const(0), // key_ptr Instruction::I32Const(max_key_len as i32), // key_len - Instruction::I32Const(0), // value_ptr - Instruction::I32Const(0), // value_len is 0 as testing vs pre-existing value len + Instruction::I32Const(0), // value_ptr + Instruction::I32Const(0), /* value_len is 0 as testing vs + * pre-existing value len */ Instruction::Call(0), Instruction::Drop, Instruction::End, ])), - .. Default::default() + ..Default::default() }); let instance = Contract::::new(code, vec![])?; let info = instance.info()?; info.write( - &Key::::try_from_var(key).map_err(|e| "Key has wrong length")?, + &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, Some(vec![42u8; n as usize]), None, false, ) .map_err(|_| "Failed to write to storage during setup.")?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } // Similar to seal_set_storage. We store all the keys that we are about to // delete beforehand in order to prevent any optimizations that could occur when // deleting a non existing key. We generate keys of a maximum length, and have to // the amount of runs in order to make resulting contract code size less than MaxCodeLen. - #[skip_meta] - #[pov_mode = Measured] - seal_clear_storage { - let r in 0 .. API_BENCHMARK_RUNS/2; + #[benchmark(skip_meta, pov_mode = Measured)] + fn seal_clear_storage(r: Linear<0, { API_BENCHMARK_RUNS / 2 }>) -> Result<(), BenchmarkError> { let max_key_len = T::MaxStorageKeyLen::get(); - let keys = (0 .. r) - .map(|n| { let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); - h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); h }) - .collect::>(); + let keys = (0..r) + .map(|n| { + let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); + h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); + h + }) + .collect::>(); let key_bytes = keys.iter().flatten().cloned().collect::>(); let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), @@ -1307,25 +1432,23 @@ benchmarks! { params: vec![ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], - data_segments: vec![ - DataSegment { - offset: 0, - value: key_bytes, - }, - ], - call_body: Some(body::repeated_dyn(r, vec![ - Counter(0, max_key_len as u32), // key_ptr - Regular(Instruction::I32Const(max_key_len as i32)), // key_len - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ])), - .. Default::default() + data_segments: vec![DataSegment { offset: 0, value: key_bytes }], + call_body: Some(body::repeated_dyn( + r, + vec![ + Counter(0, max_key_len as u32), // key_ptr + Regular(Instruction::I32Const(max_key_len as i32)), // key_len + Regular(Instruction::Call(0)), + Regular(Instruction::Drop), + ], + )), + ..Default::default() }); let instance = Contract::::new(code, vec![])?; let info = instance.info()?; for key in keys { info.write( - &Key::::try_from_var(key).map_err(|e| "Key has wrong length")?, + &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, Some(vec![]), None, false, @@ -1334,12 +1457,15 @@ benchmarks! { } >::insert(&instance.account_id, info); let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } - #[skip_meta] - #[pov_mode = Measured] - seal_clear_storage_per_byte { - let n in 0 .. T::Schedule::get().limits.payload_len; + #[benchmark(skip_meta, pov_mode = Measured)] + fn seal_clear_storage_per_byte( + n: Linear<0, { T::Schedule::get().limits.payload_len }>, + ) -> Result<(), BenchmarkError> { let max_key_len = T::MaxStorageKeyLen::get(); let key = vec![0u8; max_key_len as usize]; let code = WasmModule::::from(ModuleDefinition { @@ -1350,43 +1476,42 @@ benchmarks! { params: vec![ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], - data_segments: vec![ - DataSegment { - offset: 0, - value: key.clone(), - }, - ], + data_segments: vec![DataSegment { offset: 0, value: key.clone() }], call_body: Some(body::plain(vec![ - Instruction::I32Const(0), // key_ptr + Instruction::I32Const(0), // key_ptr Instruction::I32Const(max_key_len as i32), // key_len Instruction::Call(0), Instruction::Drop, Instruction::End, ])), - .. Default::default() + ..Default::default() }); let instance = Contract::::new(code, vec![])?; let info = instance.info()?; info.write( - &Key::::try_from_var(key).map_err(|e| "Key has wrong length")?, + &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, Some(vec![42u8; n as usize]), None, false, ) .map_err(|_| "Failed to write to storage during setup.")?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } // We make sure that all storage accesses are to unique keys. - #[skip_meta] - #[pov_mode = Measured] - seal_get_storage { - let r in 0 .. API_BENCHMARK_RUNS/2; + #[benchmark(skip_meta, pov_mode = Measured)] + fn seal_get_storage(r: Linear<0, { API_BENCHMARK_RUNS / 2 }>) -> Result<(), BenchmarkError> { let max_key_len = T::MaxStorageKeyLen::get(); - let keys = (0 .. r) - .map(|n| { let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); - h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); h }) - .collect::>(); + let keys = (0..r) + .map(|n| { + let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); + h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); + h + }) + .collect::>(); let key_bytes = keys.iter().flatten().cloned().collect::>(); let key_bytes_len = key_bytes.len(); let code = WasmModule::::from(ModuleDefinition { @@ -1398,30 +1523,30 @@ benchmarks! { return_type: Some(ValueType::I32), }], data_segments: vec![ - DataSegment { - offset: 0, - value: key_bytes, - }, + DataSegment { offset: 0, value: key_bytes }, DataSegment { offset: key_bytes_len as u32, value: T::Schedule::get().limits.payload_len.to_le_bytes().into(), }, ], - call_body: Some(body::repeated_dyn(r, vec![ - Counter(0, max_key_len), // key_ptr - Regular(Instruction::I32Const(max_key_len as i32)), // key_len - Regular(Instruction::I32Const((key_bytes_len + 4) as i32)), // out_ptr - Regular(Instruction::I32Const(key_bytes_len as i32)), // out_len_ptr - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ])), - .. Default::default() + call_body: Some(body::repeated_dyn( + r, + vec![ + Counter(0, max_key_len), // key_ptr + Regular(Instruction::I32Const(max_key_len as i32)), // key_len + Regular(Instruction::I32Const((key_bytes_len + 4) as i32)), // out_ptr + Regular(Instruction::I32Const(key_bytes_len as i32)), // out_len_ptr + Regular(Instruction::Call(0)), + Regular(Instruction::Drop), + ], + )), + ..Default::default() }); let instance = Contract::::new(code, vec![])?; let info = instance.info()?; for key in keys { info.write( - &Key::::try_from_var(key).map_err(|e| "Key has wrong length")?, + &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, Some(vec![]), None, false, @@ -1430,12 +1555,15 @@ benchmarks! { } >::insert(&instance.account_id, info); let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } - #[skip_meta] - #[pov_mode = Measured] - seal_get_storage_per_byte { - let n in 0 .. T::Schedule::get().limits.payload_len; + #[benchmark(skip_meta, pov_mode = Measured)] + fn seal_get_storage_per_byte( + n: Linear<0, { T::Schedule::get().limits.payload_len }>, + ) -> Result<(), BenchmarkError> { let max_key_len = T::MaxStorageKeyLen::get(); let key = vec![0u8; max_key_len as usize]; let code = WasmModule::::from(ModuleDefinition { @@ -1447,30 +1575,27 @@ benchmarks! { return_type: Some(ValueType::I32), }], data_segments: vec![ - DataSegment { - offset: 0, - value: key.clone(), - }, + DataSegment { offset: 0, value: key.clone() }, DataSegment { offset: max_key_len, value: T::Schedule::get().limits.payload_len.to_le_bytes().into(), }, ], call_body: Some(body::plain(vec![ - Instruction::I32Const(0), // key_ptr - Instruction::I32Const(max_key_len as i32), // key_len + Instruction::I32Const(0), // key_ptr + Instruction::I32Const(max_key_len as i32), // key_len Instruction::I32Const((max_key_len + 4) as i32), // out_ptr - Instruction::I32Const(max_key_len as i32), // out_len_ptr + Instruction::I32Const(max_key_len as i32), // out_len_ptr Instruction::Call(0), Instruction::Drop, Instruction::End, ])), - .. Default::default() + ..Default::default() }); let instance = Contract::::new(code, vec![])?; let info = instance.info()?; info.write( - &Key::::try_from_var(key).map_err(|e| "Key has wrong length")?, + &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, Some(vec![42u8; n as usize]), None, false, @@ -1478,20 +1603,25 @@ benchmarks! { .map_err(|_| "Failed to write to storage during setup.")?; >::insert(&instance.account_id, info); let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } // We make sure that all storage accesses are to unique keys. - #[skip_meta] - #[pov_mode = Measured] - seal_contains_storage { - let r in 0 .. API_BENCHMARK_RUNS/2; + #[benchmark(skip_meta, pov_mode = Measured)] + fn seal_contains_storage( + r: Linear<0, { API_BENCHMARK_RUNS / 2 }>, + ) -> Result<(), BenchmarkError> { let max_key_len = T::MaxStorageKeyLen::get(); - let keys = (0 .. r) - .map(|n| { let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); - h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); h }) - .collect::>(); + let keys = (0..r) + .map(|n| { + let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); + h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); + h + }) + .collect::>(); let key_bytes = keys.iter().flatten().cloned().collect::>(); - let key_bytes_len = key_bytes.len(); let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -1500,25 +1630,23 @@ benchmarks! { params: vec![ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], - data_segments: vec![ - DataSegment { - offset: 0, - value: key_bytes, - }, - ], - call_body: Some(body::repeated_dyn(r, vec![ - Counter(0, max_key_len as u32), // key_ptr - Regular(Instruction::I32Const(max_key_len as i32)), // key_len - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ])), - .. Default::default() + data_segments: vec![DataSegment { offset: 0, value: key_bytes }], + call_body: Some(body::repeated_dyn( + r, + vec![ + Counter(0, max_key_len as u32), // key_ptr + Regular(Instruction::I32Const(max_key_len as i32)), // key_len + Regular(Instruction::Call(0)), + Regular(Instruction::Drop), + ], + )), + ..Default::default() }); let instance = Contract::::new(code, vec![])?; let info = instance.info()?; for key in keys { info.write( - &Key::::try_from_var(key).map_err(|e| "Key has wrong length")?, + &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, Some(vec![]), None, false, @@ -1527,12 +1655,15 @@ benchmarks! { } >::insert(&instance.account_id, info); let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } - #[skip_meta] - #[pov_mode = Measured] - seal_contains_storage_per_byte { - let n in 0 .. T::Schedule::get().limits.payload_len; + #[benchmark(skip_meta, pov_mode = Measured)] + fn seal_contains_storage_per_byte( + n: Linear<0, { T::Schedule::get().limits.payload_len }>, + ) -> Result<(), BenchmarkError> { let max_key_len = T::MaxStorageKeyLen::get(); let key = vec![0u8; max_key_len as usize]; let code = WasmModule::::from(ModuleDefinition { @@ -1543,25 +1674,20 @@ benchmarks! { params: vec![ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], - data_segments: vec![ - DataSegment { - offset: 0, - value: key.clone(), - }, - ], + data_segments: vec![DataSegment { offset: 0, value: key.clone() }], call_body: Some(body::plain(vec![ - Instruction::I32Const(0), // key_ptr + Instruction::I32Const(0), // key_ptr Instruction::I32Const(max_key_len as i32), // key_len Instruction::Call(0), Instruction::Drop, Instruction::End, ])), - .. Default::default() + ..Default::default() }); let instance = Contract::::new(code, vec![])?; let info = instance.info()?; info.write( - &Key::::try_from_var(key).map_err(|e| "Key has wrong length")?, + &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, Some(vec![42u8; n as usize]), None, false, @@ -1569,17 +1695,21 @@ benchmarks! { .map_err(|_| "Failed to write to storage during setup.")?; >::insert(&instance.account_id, info); let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } - #[skip_meta] - #[pov_mode = Measured] - seal_take_storage { - let r in 0 .. API_BENCHMARK_RUNS/2; + #[benchmark(skip_meta, pov_mode = Measured)] + fn seal_take_storage(r: Linear<0, { API_BENCHMARK_RUNS / 2 }>) -> Result<(), BenchmarkError> { let max_key_len = T::MaxStorageKeyLen::get(); - let keys = (0 .. r) - .map(|n| { let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); - h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); h }) - .collect::>(); + let keys = (0..r) + .map(|n| { + let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); + h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); + h + }) + .collect::>(); let key_bytes = keys.iter().flatten().cloned().collect::>(); let key_bytes_len = key_bytes.len(); let code = WasmModule::::from(ModuleDefinition { @@ -1591,30 +1721,30 @@ benchmarks! { return_type: Some(ValueType::I32), }], data_segments: vec![ - DataSegment { - offset: 0, - value: key_bytes, - }, + DataSegment { offset: 0, value: key_bytes }, DataSegment { offset: key_bytes_len as u32, value: T::Schedule::get().limits.payload_len.to_le_bytes().into(), }, ], - call_body: Some(body::repeated_dyn(r, vec![ - Counter(0, max_key_len as u32), // key_ptr - Regular(Instruction::I32Const(max_key_len as i32)), // key_len - Regular(Instruction::I32Const((key_bytes_len + 4) as i32)), // out_ptr - Regular(Instruction::I32Const(key_bytes_len as i32)), // out_len_ptr - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ])), - .. Default::default() + call_body: Some(body::repeated_dyn( + r, + vec![ + Counter(0, max_key_len as u32), // key_ptr + Regular(Instruction::I32Const(max_key_len as i32)), // key_len + Regular(Instruction::I32Const((key_bytes_len + 4) as i32)), // out_ptr + Regular(Instruction::I32Const(key_bytes_len as i32)), // out_len_ptr + Regular(Instruction::Call(0)), + Regular(Instruction::Drop), + ], + )), + ..Default::default() }); let instance = Contract::::new(code, vec![])?; let info = instance.info()?; for key in keys { info.write( - &Key::::try_from_var(key).map_err(|e| "Key has wrong length")?, + &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, Some(vec![]), None, false, @@ -1623,12 +1753,15 @@ benchmarks! { } >::insert(&instance.account_id, info); let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } - #[skip_meta] - #[pov_mode = Measured] - seal_take_storage_per_byte { - let n in 0 .. T::Schedule::get().limits.payload_len; + #[benchmark(skip_meta, pov_mode = Measured)] + fn seal_take_storage_per_byte( + n: Linear<0, { T::Schedule::get().limits.payload_len }>, + ) -> Result<(), BenchmarkError> { let max_key_len = T::MaxStorageKeyLen::get(); let key = vec![0u8; max_key_len as usize]; let code = WasmModule::::from(ModuleDefinition { @@ -1640,30 +1773,27 @@ benchmarks! { return_type: Some(ValueType::I32), }], data_segments: vec![ - DataSegment { - offset: 0, - value: key.clone(), - }, + DataSegment { offset: 0, value: key.clone() }, DataSegment { offset: max_key_len, value: T::Schedule::get().limits.payload_len.to_le_bytes().into(), }, ], call_body: Some(body::plain(vec![ - Instruction::I32Const(0), // key_ptr - Instruction::I32Const(max_key_len as i32), // key_len + Instruction::I32Const(0), // key_ptr + Instruction::I32Const(max_key_len as i32), // key_len Instruction::I32Const((max_key_len + 4) as i32), // out_ptr - Instruction::I32Const(max_key_len as i32), // out_len_ptr + Instruction::I32Const(max_key_len as i32), // out_len_ptr Instruction::Call(0), Instruction::Drop, Instruction::End, ])), - .. Default::default() + ..Default::default() }); let instance = Contract::::new(code, vec![])?; let info = instance.info()?; info.write( - &Key::::try_from_var(key).map_err(|e| "Key has wrong length")?, + &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, Some(vec![42u8; n as usize]), None, false, @@ -1671,15 +1801,16 @@ benchmarks! { .map_err(|_| "Failed to write to storage during setup.")?; >::insert(&instance.account_id, info); let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } // We transfer to unique accounts. - #[pov_mode = Measured] - seal_transfer { - let r in 0 .. API_BENCHMARK_RUNS; - let accounts = (0..r) - .map(|i| account::("receiver", i, 0)) - .collect::>(); + #[benchmark(pov_mode = Measured)] + fn seal_transfer(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { + let accounts = + (0..r).map(|i| account::("receiver", i, 0)).collect::>(); let account_len = accounts.get(0).map(|i| i.encode().len()).unwrap_or(0); let account_bytes = accounts.iter().flat_map(|x| x.encode()).collect(); let value = Pallet::::min_balance(); @@ -1695,24 +1826,21 @@ benchmarks! { return_type: Some(ValueType::I32), }], data_segments: vec![ - DataSegment { - offset: 0, - value: value_bytes, - }, - DataSegment { - offset: value_len as u32, - value: account_bytes, - }, + DataSegment { offset: 0, value: value_bytes }, + DataSegment { offset: value_len as u32, value: account_bytes }, ], - call_body: Some(body::repeated_dyn(r, vec![ - Counter(value_len as u32, account_len as u32), // account_ptr - Regular(Instruction::I32Const(account_len as i32)), // account_len - Regular(Instruction::I32Const(0)), // value_ptr - Regular(Instruction::I32Const(value_len as i32)), // value_len - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ])), - .. Default::default() + call_body: Some(body::repeated_dyn( + r, + vec![ + Counter(value_len as u32, account_len as u32), // account_ptr + Regular(Instruction::I32Const(account_len as i32)), // account_len + Regular(Instruction::I32Const(0)), // value_ptr + Regular(Instruction::I32Const(value_len as i32)), // value_len + Regular(Instruction::Call(0)), + Regular(Instruction::Drop), + ], + )), + ..Default::default() }); let instance = Contract::::new(code, vec![])?; instance.set_balance(value * (r + 1).into()); @@ -1720,19 +1848,20 @@ benchmarks! { for account in &accounts { assert_eq!(T::Currency::total_balance(account), 0u32.into()); } - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) - verify { + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + for account in &accounts { assert_eq!(T::Currency::total_balance(account), value); } + Ok(()) } // We call unique accounts. // - // This is a slow call: We redeuce the number of runs. - #[pov_mode = Measured] - seal_call { - let r in 0 .. API_BENCHMARK_RUNS / 2; + // This is a slow call: We reduce the number of runs. + #[benchmark(pov_mode = Measured)] + fn seal_call(r: Linear<0, { API_BENCHMARK_RUNS / 2 }>) -> Result<(), BenchmarkError> { let dummy_code = WasmModule::::dummy_with_bytes(0); let callees = (0..r) .map(|i| Contract::with_index(i + 1, dummy_code.clone(), vec![])) @@ -1745,7 +1874,7 @@ benchmarks! { // Set an own limit every 2nd call let own_limit = (u32::MAX - 100).into(); let deposits = (0..r) - .map(|i| if i % 2 == 0 { 0u32.into() } else { own_limit } ) + .map(|i| if i % 2 == 0 { 0u32.into() } else { own_limit }) .collect::>>(); let deposits_bytes: Vec = deposits.iter().flat_map(|i| i.encode()).collect(); let deposits_len = deposits_bytes.len() as u32; @@ -1771,43 +1900,46 @@ benchmarks! { return_type: Some(ValueType::I32), }], data_segments: vec![ - DataSegment { - offset: 0, - value: value_bytes, - }, - DataSegment { - offset: value_len, - value: deposits_bytes, - }, - DataSegment { - offset: callee_offset, - value: callee_bytes, - }, + DataSegment { offset: 0, value: value_bytes }, + DataSegment { offset: value_len, value: deposits_bytes }, + DataSegment { offset: callee_offset, value: callee_bytes }, ], - call_body: Some(body::repeated_dyn(r, vec![ - Regular(Instruction::I32Const(0)), // flags - Counter(callee_offset, callee_len as u32), // callee_ptr - Regular(Instruction::I64Const(0)), // ref_time weight - Regular(Instruction::I64Const(0)), // proof_size weight - Counter(value_len, deposit_len as u32), // deposit_limit_ptr - Regular(Instruction::I32Const(0)), // value_ptr - Regular(Instruction::I32Const(0)), // input_data_ptr - Regular(Instruction::I32Const(0)), // input_data_len - Regular(Instruction::I32Const(SENTINEL as i32)), // output_ptr - Regular(Instruction::I32Const(0)), // output_len_ptr - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ])), - .. Default::default() + call_body: Some(body::repeated_dyn( + r, + vec![ + Regular(Instruction::I32Const(0)), // flags + Counter(callee_offset, callee_len as u32), // callee_ptr + Regular(Instruction::I64Const(0)), // ref_time weight + Regular(Instruction::I64Const(0)), // proof_size weight + Counter(value_len, deposit_len as u32), // deposit_limit_ptr + Regular(Instruction::I32Const(0)), // value_ptr + Regular(Instruction::I32Const(0)), // input_data_ptr + Regular(Instruction::I32Const(0)), // input_data_len + Regular(Instruction::I32Const(SENTINEL as i32)), // output_ptr + Regular(Instruction::I32Const(0)), // output_len_ptr + Regular(Instruction::Call(0)), + Regular(Instruction::Drop), + ], + )), + ..Default::default() }); let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, Some(BalanceOf::::from(u32::MAX.into()).into()), vec![]) + #[extrinsic_call] + call( + origin, + instance.addr, + 0u32.into(), + Weight::MAX, + Some(BalanceOf::::from(u32::MAX.into()).into()), + vec![], + ); + Ok(()) + } - // This is a slow call: We redeuce the number of runs. - #[pov_mode = Measured] - seal_delegate_call { - let r in 0 .. API_BENCHMARK_RUNS / 2; + // This is a slow call: We reduce the number of runs. + #[benchmark(pov_mode = Measured)] + fn seal_delegate_call(r: Linear<0, { API_BENCHMARK_RUNS / 2 }>) -> Result<(), BenchmarkError> { let hashes = (0..r) .map(|i| { let code = WasmModule::::dummy_with_bytes(i); @@ -1819,7 +1951,6 @@ benchmarks! { .collect::, &'static str>>()?; let hash_len = hashes.get(0).map(|x| x.encode().len()).unwrap_or(0); let hashes_bytes = hashes.iter().flat_map(|x| x.encode()).collect::>(); - let hashes_len = hashes_bytes.len(); let hashes_offset = 0; let code = WasmModule::::from(ModuleDefinition { @@ -1837,33 +1968,35 @@ benchmarks! { ], return_type: Some(ValueType::I32), }], - data_segments: vec![ - DataSegment { - offset: hashes_offset as u32, - value: hashes_bytes, - }, - ], - call_body: Some(body::repeated_dyn(r, vec![ - Regular(Instruction::I32Const(0)), // flags - Counter(hashes_offset as u32, hash_len as u32), // code_hash_ptr - Regular(Instruction::I32Const(0)), // input_data_ptr - Regular(Instruction::I32Const(0)), // input_data_len - Regular(Instruction::I32Const(u32::max_value() as i32)), // output_ptr - Regular(Instruction::I32Const(0)), // output_len_ptr - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ])), - .. Default::default() + data_segments: vec![DataSegment { offset: hashes_offset as u32, value: hashes_bytes }], + call_body: Some(body::repeated_dyn( + r, + vec![ + Regular(Instruction::I32Const(0)), // flags + Counter(hashes_offset as u32, hash_len as u32), // code_hash_ptr + Regular(Instruction::I32Const(0)), // input_data_ptr + Regular(Instruction::I32Const(0)), // input_data_len + Regular(Instruction::I32Const(u32::max_value() as i32)), // output_ptr + Regular(Instruction::I32Const(0)), // output_len_ptr + Regular(Instruction::Call(0)), + Regular(Instruction::Drop), + ], + )), + ..Default::default() }); let instance = Contract::::new(code, vec![])?; let callee = instance.addr.clone(); let origin = RawOrigin::Signed(instance.caller); - }: call(origin, callee, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, callee, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } - #[pov_mode = Measured] - seal_call_per_transfer_clone_byte { - let t in 0 .. 1; - let c in 0 .. code::max_pages::() * 64 * 1024; + #[benchmark(pov_mode = Measured)] + fn seal_call_per_transfer_clone_byte( + t: Linear<0, { 1 }>, + c: Linear<0, { code::max_pages::() * 64 * 1024 }>, + ) -> Result<(), BenchmarkError> { let callee = Contract::with_index(5, >::dummy(), vec![])?; let value: BalanceOf = t.into(); let value_bytes = value.encode(); @@ -1886,40 +2019,36 @@ benchmarks! { return_type: Some(ValueType::I32), }], data_segments: vec![ - DataSegment { - offset: 0, - value: value_bytes, - }, - DataSegment { - offset: value_len as u32, - value: callee.account_id.encode(), - }, + DataSegment { offset: 0, value: value_bytes }, + DataSegment { offset: value_len as u32, value: callee.account_id.encode() }, ], call_body: Some(body::plain(vec![ Instruction::I32Const(CallFlags::CLONE_INPUT.bits() as i32), // flags - Instruction::I32Const(value_len as i32), // callee_ptr - Instruction::I64Const(0), // gas - Instruction::I32Const(0), // value_ptr - Instruction::I32Const(0), // input_data_ptr - Instruction::I32Const(0), // input_data_len - Instruction::I32Const(SENTINEL as i32), // output_ptr - Instruction::I32Const(0), // output_len_ptr + Instruction::I32Const(value_len as i32), // callee_ptr + Instruction::I64Const(0), // gas + Instruction::I32Const(0), // value_ptr + Instruction::I32Const(0), // input_data_ptr + Instruction::I32Const(0), // input_data_len + Instruction::I32Const(SENTINEL as i32), // output_ptr + Instruction::I32Const(0), // output_len_ptr Instruction::Call(0), Instruction::Drop, Instruction::End, ])), - .. Default::default() + ..Default::default() }); let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); let bytes = vec![42; c as usize]; - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, bytes) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, bytes); + Ok(()) + } // We assume that every instantiate sends at least the minimum balance. // This is a slow call: we reduce the number of runs. - #[pov_mode = Measured] - seal_instantiate { - let r in 1 .. API_BENCHMARK_RUNS / 2; + #[benchmark(pov_mode = Measured)] + fn seal_instantiate(r: Linear<1, { API_BENCHMARK_RUNS / 2 }>) -> Result<(), BenchmarkError> { let hashes = (0..r) .map(|i| { let code = WasmModule::::from(ModuleDefinition { @@ -1931,7 +2060,7 @@ benchmarks! { Instruction::Drop, Instruction::End, ])), - .. Default::default() + ..Default::default() }); let caller = whitelisted_caller(); T::Currency::set_balance(&caller, caller_funding::()); @@ -1974,37 +2103,35 @@ benchmarks! { return_type: Some(ValueType::I32), }], data_segments: vec![ - DataSegment { - offset: 0, - value: value_bytes, - }, - DataSegment { - offset: hashes_offset as u32, - value: hashes_bytes, - }, + DataSegment { offset: 0, value: value_bytes }, + DataSegment { offset: hashes_offset as u32, value: hashes_bytes }, DataSegment { offset: addr_len_offset as u32, value: addr_len.to_le_bytes().into(), }, ], - call_body: Some(body::repeated_dyn(r, vec![ - Counter(hashes_offset as u32, hash_len as u32), // code_hash_ptr - Regular(Instruction::I64Const(0)), // ref_time weight - Regular(Instruction::I64Const(0)), // proof_size weight - Regular(Instruction::I32Const(SENTINEL as i32)), // deposit limit ptr: use parent's limit - Regular(Instruction::I32Const(0)), // value_ptr - Regular(Instruction::I32Const(0)), // input_data_ptr - Regular(Instruction::I32Const(0)), // input_data_len - Regular(Instruction::I32Const(addr_offset as i32)), // address_ptr - Regular(Instruction::I32Const(addr_len_offset as i32)), // address_len_ptr - Regular(Instruction::I32Const(SENTINEL as i32)), // output_ptr - Regular(Instruction::I32Const(0)), // output_len_ptr - Regular(Instruction::I32Const(0)), // salt_ptr - Regular(Instruction::I32Const(0)), // salt_len_ptr - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ])), - .. Default::default() + call_body: Some(body::repeated_dyn( + r, + vec![ + Counter(hashes_offset as u32, hash_len as u32), // code_hash_ptr + Regular(Instruction::I64Const(0)), // ref_time weight + Regular(Instruction::I64Const(0)), // proof_size weight + Regular(Instruction::I32Const(SENTINEL as i32)), /* deposit limit ptr: use + * parent's limit */ + Regular(Instruction::I32Const(0)), // value_ptr + Regular(Instruction::I32Const(0)), // input_data_ptr + Regular(Instruction::I32Const(0)), // input_data_len + Regular(Instruction::I32Const(addr_offset as i32)), // address_ptr + Regular(Instruction::I32Const(addr_len_offset as i32)), // address_len_ptr + Regular(Instruction::I32Const(SENTINEL as i32)), // output_ptr + Regular(Instruction::I32Const(0)), // output_len_ptr + Regular(Instruction::I32Const(0)), // salt_ptr + Regular(Instruction::I32Const(0)), // salt_len_ptr + Regular(Instruction::Call(0)), + Regular(Instruction::Drop), + ], + )), + ..Default::default() }); let instance = Contract::::new(code, vec![])?; instance.set_balance((value + Pallet::::min_balance()) * (r + 1).into()); @@ -2012,9 +2139,7 @@ benchmarks! { let callee = instance.addr.clone(); let addresses = hashes .iter() - .map(|hash| Contracts::::contract_address( - &instance.account_id, hash, &[], &[], - )) + .map(|hash| Contracts::::contract_address(&instance.account_id, hash, &[], &[])) .collect::>(); for addr in &addresses { @@ -2022,27 +2147,27 @@ benchmarks! { return Err("Expected that contract does not exist at this point.".into()); } } - }: call(origin, callee, 0u32.into(), Weight::MAX, None, vec![]) - verify { + #[extrinsic_call] + call(origin, callee, 0u32.into(), Weight::MAX, None, vec![]); for addr in &addresses { - ContractInfoOf::::get(&addr) - .ok_or("Contract should have been instantiated")?; + ContractInfoOf::::get(&addr).ok_or("Contract should have been instantiated")?; } + Ok(()) } - #[pov_mode = Measured] - seal_instantiate_per_transfer_input_salt_byte { - let t in 0 .. 1; - let i in 0 .. (code::max_pages::() - 1) * 64 * 1024; - let s in 0 .. (code::max_pages::() - 1) * 64 * 1024; + #[benchmark(pov_mode = Measured)] + fn seal_instantiate_per_transfer_input_salt_byte( + t: Linear<0, 1>, + i: Linear<0, { (code::max_pages::() - 1) * 64 * 1024 }>, + s: Linear<0, { (code::max_pages::() - 1) * 64 * 1024 }>, + ) -> Result<(), BenchmarkError> { let callee_code = WasmModule::::dummy(); - let hash = callee_code.hash; let hash_bytes = callee_code.hash.encode(); let hash_len = hash_bytes.len(); let caller = whitelisted_caller(); T::Currency::set_balance(&caller, caller_funding::()); Contracts::::store_code_raw(callee_code.code, caller)?; - let value: BalanceOf = t.into(); + let value: BalanceOf = t.into(); let value_bytes = value.encode(); let code = WasmModule::::from(ModuleDefinition { @@ -2066,27 +2191,21 @@ benchmarks! { return_type: Some(ValueType::I32), }], data_segments: vec![ - DataSegment { - offset: 0, - value: hash_bytes, - }, - DataSegment { - offset: hash_len as u32, - value: value_bytes, - }, + DataSegment { offset: 0, value: hash_bytes }, + DataSegment { offset: hash_len as u32, value: value_bytes }, ], call_body: Some(body::plain(vec![ - Instruction::I32Const(0 as i32), // code_hash_ptr - Instruction::I64Const(0), // gas + Instruction::I32Const(0 as i32), // code_hash_ptr + Instruction::I64Const(0), // gas Instruction::I32Const(hash_len as i32), // value_ptr - Instruction::I32Const(0 as i32), // input_data_ptr - Instruction::I32Const(i as i32), // input_data_len + Instruction::I32Const(0 as i32), // input_data_ptr + Instruction::I32Const(i as i32), // input_data_len Instruction::I32Const(SENTINEL as i32), // address_ptr - Instruction::I32Const(0), // address_len_ptr + Instruction::I32Const(0), // address_len_ptr Instruction::I32Const(SENTINEL as i32), // output_ptr - Instruction::I32Const(0), // output_len_ptr - Instruction::I32Const(0 as i32), // salt_ptr - Instruction::I32Const(s as i32), // salt_len + Instruction::I32Const(0), // output_len_ptr + Instruction::I32Const(0 as i32), // salt_ptr + Instruction::I32Const(s as i32), // salt_len Instruction::Call(0), Instruction::I32Eqz, Instruction::If(BlockType::NoResult), @@ -2096,105 +2215,123 @@ benchmarks! { Instruction::End, Instruction::End, ])), - .. Default::default() + ..Default::default() }); let instance = Contract::::new(code, vec![])?; instance.set_balance(value + (Pallet::::min_balance() * 2u32.into())); let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } // Only the overhead of calling the function itself with minimal arguments. - #[pov_mode = Measured] - seal_hash_sha2_256 { - let r in 0 .. API_BENCHMARK_RUNS; - let instance = Contract::::new(WasmModule::hasher( - "seal_hash_sha2_256", r, 0, - ), vec![])?; + #[benchmark(pov_mode = Measured)] + fn seal_hash_sha2_256(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { + let instance = Contract::::new(WasmModule::hasher("seal_hash_sha2_256", r, 0), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } // `n`: Input to hash in bytes - #[pov_mode = Measured] - seal_hash_sha2_256_per_byte { - let n in 0 .. code::max_pages::() * 64 * 1024; - let instance = Contract::::new(WasmModule::hasher( - "seal_hash_sha2_256", 1, n, - ), vec![])?; + #[benchmark(pov_mode = Measured)] + fn seal_hash_sha2_256_per_byte( + n: Linear<0, { code::max_pages::() * 64 * 1024 }>, + ) -> Result<(), BenchmarkError> { + let instance = Contract::::new(WasmModule::hasher("seal_hash_sha2_256", 1, n), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } // Only the overhead of calling the function itself with minimal arguments. - #[pov_mode = Measured] - seal_hash_keccak_256 { - let r in 0 .. API_BENCHMARK_RUNS; - let instance = Contract::::new(WasmModule::hasher( - "seal_hash_keccak_256", r, 0, - ), vec![])?; + #[benchmark(pov_mode = Measured)] + fn seal_hash_keccak_256(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { + let instance = + Contract::::new(WasmModule::hasher("seal_hash_keccak_256", r, 0), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } // `n`: Input to hash in bytes - #[pov_mode = Measured] - seal_hash_keccak_256_per_byte { - let n in 0 .. code::max_pages::() * 64 * 1024; - let instance = Contract::::new(WasmModule::hasher( - "seal_hash_keccak_256", 1, n, - ), vec![])?; + #[benchmark(pov_mode = Measured)] + fn seal_hash_keccak_256_per_byte( + n: Linear<0, { code::max_pages::() * 64 * 1024 }>, + ) -> Result<(), BenchmarkError> { + let instance = + Contract::::new(WasmModule::hasher("seal_hash_keccak_256", 1, n), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } // Only the overhead of calling the function itself with minimal arguments. - #[pov_mode = Measured] - seal_hash_blake2_256 { - let r in 0 .. API_BENCHMARK_RUNS; - let instance = Contract::::new(WasmModule::hasher( - "seal_hash_blake2_256", r, 0, - ), vec![])?; + #[benchmark(pov_mode = Measured)] + fn seal_hash_blake2_256(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { + let instance = + Contract::::new(WasmModule::hasher("seal_hash_blake2_256", r, 0), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } // `n`: Input to hash in bytes - #[pov_mode = Measured] - seal_hash_blake2_256_per_byte { - let n in 0 .. code::max_pages::() * 64 * 1024; - let instance = Contract::::new(WasmModule::hasher( - "seal_hash_blake2_256", 1, n, - ), vec![])?; + #[benchmark(pov_mode = Measured)] + fn seal_hash_blake2_256_per_byte( + n: Linear<0, { code::max_pages::() * 64 * 1024 }>, + ) -> Result<(), BenchmarkError> { + let instance = + Contract::::new(WasmModule::hasher("seal_hash_blake2_256", 1, n), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } // Only the overhead of calling the function itself with minimal arguments. - #[pov_mode = Measured] - seal_hash_blake2_128 { - let r in 0 .. API_BENCHMARK_RUNS; - let instance = Contract::::new(WasmModule::hasher( - "seal_hash_blake2_128", r, 0, - ), vec![])?; + #[benchmark(pov_mode = Measured)] + fn seal_hash_blake2_128(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { + let instance = + Contract::::new(WasmModule::hasher("seal_hash_blake2_128", r, 0), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } // `n`: Input to hash in bytes - #[pov_mode = Measured] - seal_hash_blake2_128_per_byte { - let n in 0 .. code::max_pages::() * 64 * 1024; - let instance = Contract::::new(WasmModule::hasher( - "seal_hash_blake2_128", 1, n, - ), vec![])?; + #[benchmark(pov_mode = Measured)] + fn seal_hash_blake2_128_per_byte( + n: Linear<0, { code::max_pages::() * 64 * 1024 }>, + ) -> Result<(), BenchmarkError> { + let instance = + Contract::::new(WasmModule::hasher("seal_hash_blake2_128", 1, n), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } // `n`: Message input length to verify in bytes. - #[pov_mode = Measured] - seal_sr25519_verify_per_byte { - let n in 0 .. T::MaxCodeLen::get() - 255; // need some buffer so the code size does not - // exceed the max code size. - + // need some buffer so the code size does not exceed the max code size. + #[benchmark(pov_mode = Measured)] + fn seal_sr25519_verify_per_byte( + n: Linear<0, { T::MaxCodeLen::get() - 255 }>, + ) -> Result<(), BenchmarkError> { let message = (0..n).zip((32u8..127u8).cycle()).map(|(_, c)| c).collect::>(); let message_len = message.len() as i32; let key_type = sp_core::crypto::KeyTypeId(*b"code"); let pub_key = sp_io::crypto::sr25519_generate(key_type, None); - let sig = sp_io::crypto::sr25519_sign(key_type, &pub_key, &message).expect("Generates signature"); + let sig = + sp_io::crypto::sr25519_sign(key_type, &pub_key, &message).expect("Generates signature"); let sig = AsRef::<[u8; 64]>::as_ref(&sig).to_vec(); let code = WasmModule::::from(ModuleDefinition { @@ -2206,50 +2343,48 @@ benchmarks! { return_type: Some(ValueType::I32), }], data_segments: vec![ - DataSegment { - offset: 0, - value: sig, - }, - DataSegment { - offset: 64, - value: pub_key.to_vec(), - }, - DataSegment { - offset: 96, - value: message, - }, + DataSegment { offset: 0, value: sig }, + DataSegment { offset: 64, value: pub_key.to_vec() }, + DataSegment { offset: 96, value: message }, ], call_body: Some(body::plain(vec![ - Instruction::I32Const(0), // signature_ptr - Instruction::I32Const(64), // pub_key_ptr + Instruction::I32Const(0), // signature_ptr + Instruction::I32Const(64), // pub_key_ptr Instruction::I32Const(message_len), // message_len - Instruction::I32Const(96), // message_ptr + Instruction::I32Const(96), // message_ptr Instruction::Call(0), Instruction::Drop, Instruction::End, ])), - .. Default::default() + ..Default::default() }); let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } // Only calling the function itself with valid arguments. // It generates different private keys and signatures for the message "Hello world". // This is a slow call: We reduce the number of runs. - #[pov_mode = Measured] - seal_sr25519_verify { - let r in 0 .. API_BENCHMARK_RUNS / 10; - + #[benchmark(pov_mode = Measured)] + fn seal_sr25519_verify( + r: Linear<0, { API_BENCHMARK_RUNS / 10 }>, + ) -> Result<(), BenchmarkError> { let message = b"Hello world".to_vec(); let message_len = message.len() as i32; let key_type = sp_core::crypto::KeyTypeId(*b"code"); let sig_params = (0..r) - .flat_map(|i| { + .flat_map(|_| { let pub_key = sp_io::crypto::sr25519_generate(key_type, None); - let sig = sp_io::crypto::sr25519_sign(key_type, &pub_key, &message).expect("Generates signature"); - let data: [u8; 96] = [AsRef::<[u8]>::as_ref(&sig), AsRef::<[u8]>::as_ref(&pub_key)].concat().try_into().unwrap(); + let sig = sp_io::crypto::sr25519_sign(key_type, &pub_key, &message) + .expect("Generates signature"); + let data: [u8; 96] = [AsRef::<[u8]>::as_ref(&sig), AsRef::<[u8]>::as_ref(&pub_key)] + .concat() + .try_into() + .unwrap(); data }) .collect::>(); @@ -2264,42 +2399,41 @@ benchmarks! { return_type: Some(ValueType::I32), }], data_segments: vec![ - DataSegment { - offset: 0, - value: sig_params - }, - DataSegment { - offset: sig_params_len as u32, - value: message, - }, + DataSegment { offset: 0, value: sig_params }, + DataSegment { offset: sig_params_len as u32, value: message }, ], - call_body: Some(body::repeated_dyn(r, vec![ - Counter(0, 96), // signature_ptr - Counter(64, 96), // pub_key_ptr - Regular(Instruction::I32Const(message_len)), // message_len - Regular(Instruction::I32Const(sig_params_len)), // message_ptr - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ])), - .. Default::default() + call_body: Some(body::repeated_dyn( + r, + vec![ + Counter(0, 96), // signature_ptr + Counter(64, 96), // pub_key_ptr + Regular(Instruction::I32Const(message_len)), // message_len + Regular(Instruction::I32Const(sig_params_len)), // message_ptr + Regular(Instruction::Call(0)), + Regular(Instruction::Drop), + ], + )), + ..Default::default() }); let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } // Only calling the function itself with valid arguments. // It generates different private keys and signatures for the message "Hello world". // This is a slow call: We reduce the number of runs. - #[pov_mode = Measured] - seal_ecdsa_recover { - let r in 0 .. API_BENCHMARK_RUNS / 10; - + #[benchmark(pov_mode = Measured)] + fn seal_ecdsa_recover(r: Linear<0, { API_BENCHMARK_RUNS / 10 }>) -> Result<(), BenchmarkError> { let message_hash = sp_io::hashing::blake2_256("Hello world".as_bytes()); let key_type = sp_core::crypto::KeyTypeId(*b"code"); let signatures = (0..r) - .map(|i| { + .map(|_| { let pub_key = sp_io::crypto::ecdsa_generate(key_type, None); - let sig = sp_io::crypto::ecdsa_sign_prehashed(key_type, &pub_key, &message_hash).expect("Generates signature"); + let sig = sp_io::crypto::ecdsa_sign_prehashed(key_type, &pub_key, &message_hash) + .expect("Generates signature"); AsRef::<[u8; 65]>::as_ref(&sig).to_vec() }) .collect::>(); @@ -2315,40 +2449,39 @@ benchmarks! { return_type: Some(ValueType::I32), }], data_segments: vec![ - DataSegment { - offset: 0, - value: message_hash[..].to_vec(), - }, - DataSegment { - offset: 32, - value: signatures, - }, + DataSegment { offset: 0, value: message_hash[..].to_vec() }, + DataSegment { offset: 32, value: signatures }, ], - call_body: Some(body::repeated_dyn(r, vec![ - Counter(32, 65), // signature_ptr - Regular(Instruction::I32Const(0)), // message_hash_ptr - Regular(Instruction::I32Const(signatures_bytes_len + 32)), // output_len_ptr - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ])), - .. Default::default() + call_body: Some(body::repeated_dyn( + r, + vec![ + Counter(32, 65), // signature_ptr + Regular(Instruction::I32Const(0)), // message_hash_ptr + Regular(Instruction::I32Const(signatures_bytes_len + 32)), // output_len_ptr + Regular(Instruction::Call(0)), + Regular(Instruction::Drop), + ], + )), + ..Default::default() }); let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } // Only calling the function itself for the list of // generated different ECDSA keys. - // This is a slow call: We redeuce the number of runs. - #[pov_mode = Measured] - seal_ecdsa_to_eth_address { - let r in 0 .. API_BENCHMARK_RUNS / 10; + // This is a slow call: We reduce the number of runs. + #[benchmark(pov_mode = Measured)] + fn seal_ecdsa_to_eth_address( + r: Linear<0, { API_BENCHMARK_RUNS / 10 }>, + ) -> Result<(), BenchmarkError> { let key_type = sp_core::crypto::KeyTypeId(*b"code"); let pub_keys_bytes = (0..r) - .flat_map(|_| { - sp_io::crypto::ecdsa_generate(key_type, None).0 - }) - .collect::>(); + .flat_map(|_| sp_io::crypto::ecdsa_generate(key_type, None).0) + .collect::>(); let pub_keys_bytes_len = pub_keys_bytes.len() as i32; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), @@ -2358,27 +2491,27 @@ benchmarks! { params: vec![ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], - data_segments: vec![ - DataSegment { - offset: 0, - value: pub_keys_bytes, - }, - ], - call_body: Some(body::repeated_dyn(r, vec![ - Counter(0, 33), // pub_key_ptr - Regular(Instruction::I32Const(pub_keys_bytes_len)), // out_ptr - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ])), - .. Default::default() + data_segments: vec![DataSegment { offset: 0, value: pub_keys_bytes }], + call_body: Some(body::repeated_dyn( + r, + vec![ + Counter(0, 33), // pub_key_ptr + Regular(Instruction::I32Const(pub_keys_bytes_len)), // out_ptr + Regular(Instruction::Call(0)), + Regular(Instruction::Drop), + ], + )), + ..Default::default() }); let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } - #[pov_mode = Measured] - seal_set_code_hash { - let r in 0 .. API_BENCHMARK_RUNS; + #[benchmark(pov_mode = Measured)] + fn seal_set_code_hash(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { let code_hashes = (0..r) .map(|i| { let new_code = WasmModule::::dummy_with_bytes(i); @@ -2390,38 +2523,37 @@ benchmarks! { .collect::, &'static str>>()?; let code_hash_len = code_hashes.get(0).map(|x| x.encode().len()).unwrap_or(0); let code_hashes_bytes = code_hashes.iter().flat_map(|x| x.encode()).collect::>(); - let code_hashes_len = code_hashes_bytes.len(); let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { module: "seal0", name: "seal_set_code_hash", - params: vec![ - ValueType::I32, - ], + params: vec![ValueType::I32], return_type: Some(ValueType::I32), }], - data_segments: vec![ - DataSegment { - offset: 0, - value: code_hashes_bytes, - }, - ], - call_body: Some(body::repeated_dyn(r, vec![ - Counter(0, code_hash_len as u32), // code_hash_ptr - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ])), - .. Default::default() + data_segments: vec![DataSegment { offset: 0, value: code_hashes_bytes }], + call_body: Some(body::repeated_dyn( + r, + vec![ + Counter(0, code_hash_len as u32), // code_hash_ptr + Regular(Instruction::Call(0)), + Regular(Instruction::Drop), + ], + )), + ..Default::default() }); let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } - #[pov_mode = Measured] - lock_delegate_dependency { - let r in 0 .. T::MaxDelegateDependencies::get(); + #[benchmark(pov_mode = Measured)] + fn lock_delegate_dependency( + r: Linear<0, { T::MaxDelegateDependencies::get() }>, + ) -> Result<(), BenchmarkError> { let code_hashes = (0..r) .map(|i| { let new_code = WasmModule::::dummy_with_bytes(65 + i); @@ -2442,24 +2574,27 @@ benchmarks! { params: vec![ValueType::I32], return_type: None, }], - data_segments: vec![ - DataSegment { - offset: 0, - value: code_hashes_bytes, - }, - ], - call_body: Some(body::repeated_dyn(r, vec![ - Counter(0, code_hash_len as u32), // code_hash_ptr - Regular(Instruction::Call(0)), - ])), - .. Default::default() + data_segments: vec![DataSegment { offset: 0, value: code_hashes_bytes }], + call_body: Some(body::repeated_dyn( + r, + vec![ + Counter(0, code_hash_len as u32), // code_hash_ptr + Regular(Instruction::Call(0)), + ], + )), + ..Default::default() }); let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } - unlock_delegate_dependency { - let r in 0 .. T::MaxDelegateDependencies::get(); + #[benchmark] + fn unlock_delegate_dependency( + r: Linear<0, { T::MaxDelegateDependencies::get() }>, + ) -> Result<(), BenchmarkError> { let code_hashes = (0..r) .map(|i| { let new_code = WasmModule::::dummy_with_bytes(65 + i); @@ -2475,40 +2610,46 @@ benchmarks! { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "unlock_delegate_dependency", - params: vec![ValueType::I32], - return_type: None, - }, ImportedFunction { - module: "seal0", - name: "lock_delegate_dependency", - params: vec![ValueType::I32], - return_type: None - }], - data_segments: vec![ - DataSegment { - offset: 0, - value: code_hashes_bytes, + imported_functions: vec![ + ImportedFunction { + module: "seal0", + name: "unlock_delegate_dependency", + params: vec![ValueType::I32], + return_type: None, + }, + ImportedFunction { + module: "seal0", + name: "lock_delegate_dependency", + params: vec![ValueType::I32], + return_type: None, }, ], - deploy_body: Some(body::repeated_dyn(r, vec![ - Counter(0, code_hash_len as u32), // code_hash_ptr - Regular(Instruction::Call(1)), - ])), - call_body: Some(body::repeated_dyn(r, vec![ - Counter(0, code_hash_len as u32), // code_hash_ptr - Regular(Instruction::Call(0)), - ])), - .. Default::default() + data_segments: vec![DataSegment { offset: 0, value: code_hashes_bytes }], + deploy_body: Some(body::repeated_dyn( + r, + vec![ + Counter(0, code_hash_len as u32), // code_hash_ptr + Regular(Instruction::Call(1)), + ], + )), + call_body: Some(body::repeated_dyn( + r, + vec![ + Counter(0, code_hash_len as u32), // code_hash_ptr + Regular(Instruction::Call(0)), + ], + )), + ..Default::default() }); let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } - #[pov_mode = Measured] - seal_reentrance_count { - let r in 0 .. API_BENCHMARK_RUNS; + #[benchmark(pov_mode = Measured)] + fn seal_reentrance_count(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -2517,19 +2658,20 @@ benchmarks! { params: vec![], return_type: Some(ValueType::I32), }], - call_body: Some(body::repeated(r, &[ - Instruction::Call(0), - Instruction::Drop, - ])), - .. Default::default() + call_body: Some(body::repeated(r, &[Instruction::Call(0), Instruction::Drop])), + ..Default::default() }); let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } - #[pov_mode = Measured] - seal_account_reentrance_count { - let r in 0 .. API_BENCHMARK_RUNS; + #[benchmark(pov_mode = Measured)] + fn seal_account_reentrance_count( + r: Linear<0, API_BENCHMARK_RUNS>, + ) -> Result<(), BenchmarkError> { let dummy_code = WasmModule::::dummy_with_bytes(0); let accounts = (0..r) .map(|i| Contract::with_index(i + 1, dummy_code.clone(), vec![])) @@ -2544,26 +2686,26 @@ benchmarks! { params: vec![ValueType::I32], return_type: Some(ValueType::I32), }], - data_segments: vec![ - DataSegment { - offset: 0, - value: account_id_bytes, - }, - ], - call_body: Some(body::repeated_dyn(r, vec![ - Counter(0, account_id_len as u32), // account_ptr - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ])), - .. Default::default() + data_segments: vec![DataSegment { offset: 0, value: account_id_bytes }], + call_body: Some(body::repeated_dyn( + r, + vec![ + Counter(0, account_id_len as u32), // account_ptr + Regular(Instruction::Call(0)), + Regular(Instruction::Drop), + ], + )), + ..Default::default() }); let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } - #[pov_mode = Measured] - seal_instantiation_nonce { - let r in 0 .. API_BENCHMARK_RUNS; + #[benchmark(pov_mode = Measured)] + fn seal_instantiation_nonce(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -2572,29 +2714,27 @@ benchmarks! { params: vec![], return_type: Some(ValueType::I64), }], - call_body: Some(body::repeated(r, &[ - Instruction::Call(0), - Instruction::Drop, - ])), - .. Default::default() + call_body: Some(body::repeated(r, &[Instruction::Call(0), Instruction::Drop])), + ..Default::default() }); let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) + #[extrinsic_call] + call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + Ok(()) + } // We load `i64` values from random linear memory locations and store the loaded // values back into yet another random linear memory location. - // The random addresses are uniformely distributed across the entire span of the linear memory. + // The random addresses are uniformly distributed across the entire span of the linear memory. // We do this to enforce random memory accesses which are particularly expensive. // // The combination of this computation is our weight base `w_base`. - #[pov_mode = Ignored] - instr_i64_load_store { - let r in 0 .. INSTR_BENCHMARK_RUNS; - + #[benchmark(pov_mode = Ignored)] + fn instr_i64_load_store(r: Linear<0, INSTR_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { use rand::prelude::*; - // We do not need to be secure here. Fixed seed allows for determinstic results. + // We do not need to be secure here. Fixed seed allows for deterministic results. let mut rng = rand_pcg::Pcg32::seed_from_u64(8446744073709551615); let memory = ImportedMemory::max::(); @@ -2614,24 +2754,27 @@ benchmarks! { [ Instruction::I32Const(c0), // address for `i64.load_8s` Instruction::I64Load8S(0, 0), - Instruction::SetLocal(0), // temporarily store value loaded in `i64.load_8s` + Instruction::SetLocal(0), /* temporarily store value loaded in + * `i64.load_8s` */ Instruction::I32Const(c1), // address for `i64.store8` Instruction::GetLocal(0), // value to be stores in `i64.store8` Instruction::I64Store8(0, 0), ] - } + }, )), - .. Default::default() + ..Default::default() })); - }: { - sbox.invoke(); + #[block] + { + sbox.invoke(); + } + Ok(()) } // This is no benchmark. It merely exist to have an easy way to pretty print the currently // configured `Schedule` during benchmark development. Check the README on how to print this. - #[extra] - #[pov_mode = Ignored] - print_schedule { + #[benchmark(extra, pov_mode = Ignored)] + fn print_schedule() -> Result<(), BenchmarkError> { let max_weight = ::BlockWeights::get().max_block; let (weight_per_key, key_budget) = ContractInfo::::deletion_budget(max_weight); let schedule = T::Schedule::get(); @@ -2641,11 +2784,15 @@ benchmarks! { Lazy deletion weight per key: {weight_per_key} Lazy deletion keys per block: {key_budget} "); - }: {} + #[block] + {} + + Err(BenchmarkError::Skip) + } impl_benchmark_test_suite!( Contracts, crate::tests::ExtBuilder::default().build(), crate::tests::Test, - ) + ); } diff --git a/substrate/frame/contracts/src/exec.rs b/substrate/frame/contracts/src/exec.rs index e706c226a611..a868d110c2d5 100644 --- a/substrate/frame/contracts/src/exec.rs +++ b/substrate/frame/contracts/src/exec.rs @@ -532,9 +532,9 @@ enum FrameArgs<'a, T: Config, E> { nonce: u64, /// The executable whose `deploy` function is run. executable: E, - /// A salt used in the contract address deriviation of the new contract. + /// A salt used in the contract address derivation of the new contract. salt: &'a [u8], - /// The input data is used in the contract address deriviation of the new contract. + /// The input data is used in the contract address derivation of the new contract. input_data: &'a [u8], }, } @@ -1467,14 +1467,14 @@ where fn sr25519_verify(&self, signature: &[u8; 64], message: &[u8], pub_key: &[u8; 32]) -> bool { sp_io::crypto::sr25519_verify( - &SR25519Signature(*signature), + &SR25519Signature::from(*signature), message, - &SR25519Public(*pub_key), + &SR25519Public::from(*pub_key), ) } fn ecdsa_to_eth_address(&self, pk: &[u8; 33]) -> Result<[u8; 20], ()> { - ECDSAPublic(*pk).to_eth_address() + ECDSAPublic::from(*pk).to_eth_address() } #[cfg(test)] diff --git a/substrate/frame/contracts/src/gas.rs b/substrate/frame/contracts/src/gas.rs index b9d91f38f16f..32fad2140f14 100644 --- a/substrate/frame/contracts/src/gas.rs +++ b/substrate/frame/contracts/src/gas.rs @@ -352,7 +352,7 @@ mod tests { assert!(gas_meter.charge(SimpleToken(1)).is_err()); } - // Make sure that the gas meter does not charge in case of overcharger + // Make sure that the gas meter does not charge in case of overcharge #[test] fn overcharge_does_not_charge() { let mut gas_meter = GasMeter::::new(Weight::from_parts(200, 0)); diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs index bab830ac1f21..05c4e0ff74fc 100644 --- a/substrate/frame/contracts/src/lib.rs +++ b/substrate/frame/contracts/src/lib.rs @@ -301,6 +301,9 @@ pub mod pallet { /// Therefore please make sure to be restrictive about which dispatchables are allowed /// in order to not introduce a new DoS vector like memory allocation patterns that can /// be exploited to drive the runtime into a panic. + /// + /// This filter does not apply to XCM transact calls. To impose restrictions on XCM transact + /// calls, you must configure them separately within the XCM pallet itself. type CallFilter: Contains<::RuntimeCall>; /// Used to answer contracts' queries regarding the current weight price. This is **not** @@ -1107,7 +1110,7 @@ pub mod pallet { /// A more detailed error can be found on the node console if debug messages are enabled /// by supplying `-lruntime::contracts=debug`. CodeRejected, - /// An indetermistic code was used in a context where this is not permitted. + /// An indeterministic code was used in a context where this is not permitted. Indeterministic, /// A pending migration needs to complete before the extrinsic can be called. MigrationInProgress, diff --git a/substrate/frame/contracts/src/migration/v09.rs b/substrate/frame/contracts/src/migration/v09.rs index 3c49bac16b1d..5b7dd21e0063 100644 --- a/substrate/frame/contracts/src/migration/v09.rs +++ b/substrate/frame/contracts/src/migration/v09.rs @@ -131,7 +131,7 @@ impl MigrationStep for Migration { let module = CodeStorage::::get(&code_hash).unwrap(); ensure!( module.instruction_weights_version == old.instruction_weights_version, - "invalid isntruction weights version" + "invalid instruction weights version" ); ensure!(module.determinism == Determinism::Enforced, "invalid determinism"); ensure!(module.initial == old.initial, "invalid initial"); diff --git a/substrate/frame/contracts/src/schedule.rs b/substrate/frame/contracts/src/schedule.rs index b2e3801deaec..06a7c2005aa5 100644 --- a/substrate/frame/contracts/src/schedule.rs +++ b/substrate/frame/contracts/src/schedule.rs @@ -193,7 +193,7 @@ pub struct HostFnWeights { /// Weight of calling `seal_set_storage`. pub set_storage: Weight, - /// Weight per written byten of an item stored with `seal_set_storage`. + /// Weight per written byte of an item stored with `seal_set_storage`. pub set_storage_per_new_byte: Weight, /// Weight per overwritten byte of an item stored with `seal_set_storage`. diff --git a/substrate/frame/contracts/src/tests.rs b/substrate/frame/contracts/src/tests.rs index 97d3e9bc0f00..7654f148fc92 100644 --- a/substrate/frame/contracts/src/tests.rs +++ b/substrate/frame/contracts/src/tests.rs @@ -15,6 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +mod builder; mod pallet_dummy; mod test_debug; @@ -610,7 +611,7 @@ fn calling_plain_account_fails() { let base_cost = <::WeightInfo as WeightInfo>::call(); assert_eq!( - Contracts::call(RuntimeOrigin::signed(ALICE), BOB, 0, GAS_LIMIT, None, Vec::new()), + builder::call(BOB).build(), Err(DispatchErrorWithPostInfo { error: Error::::ContractNotFound.into(), post_info: PostDispatchInfo { @@ -668,32 +669,13 @@ fn migration_in_progress_works() { Contracts::set_code(RuntimeOrigin::signed(ALICE), BOB.clone(), code_hash), Error::::MigrationInProgress, ); + assert_err_ignore_postinfo!(builder::call(BOB).build(), Error::::MigrationInProgress); assert_err_ignore_postinfo!( - Contracts::call(RuntimeOrigin::signed(ALICE), BOB, 0, GAS_LIMIT, None, vec![],), + builder::instantiate_with_code(wasm).value(100_000).build(), Error::::MigrationInProgress, ); assert_err_ignore_postinfo!( - Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), - 100_000, - GAS_LIMIT, - None, - wasm, - vec![], - vec![], - ), - Error::::MigrationInProgress, - ); - assert_err_ignore_postinfo!( - Contracts::instantiate( - RuntimeOrigin::signed(ALICE), - 100_000, - GAS_LIMIT, - None, - code_hash, - vec![], - vec![], - ), + builder::instantiate(code_hash).value(100_000).build(), Error::::MigrationInProgress, ); }); @@ -721,20 +703,9 @@ fn instantiate_and_call_and_deposit_event() { initialize_block(2); // Check at the end to get hash on error easily - let addr = Contracts::bare_instantiate( - ALICE, - value, - GAS_LIMIT, - None, - Code::Existing(code_hash), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Existing(code_hash)) + .value(value) + .build_and_unwrap_account_id(); assert!(ContractInfoOf::::contains_key(&addr)); assert_eq!( @@ -812,41 +783,21 @@ fn deposit_event_max_value_limit() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { // Create let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let addr = Contracts::bare_instantiate( - ALICE, - 30_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(30_000) + .build_and_unwrap_account_id(); // Call contract with allowed storage value. - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT.set_ref_time(GAS_LIMIT.ref_time() * 2), // we are copying a huge buffer, - None, - ::Schedule::get().limits.payload_len.encode(), - )); + assert_ok!(builder::call(addr.clone()) + .gas_limit(GAS_LIMIT.set_ref_time(GAS_LIMIT.ref_time() * 2)) // we are copying a huge buffer, + .data(::Schedule::get().limits.payload_len.encode()) + .build()); // Call contract with too large a storage value. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr, - 0, - GAS_LIMIT, - None, - (::Schedule::get().limits.payload_len + 1).encode(), - ), + builder::call(addr) + .data((::Schedule::get().limits.payload_len + 1).encode()) + .build(), Error::::ValueTooLarge, ); }); @@ -860,32 +811,16 @@ fn run_out_of_fuel_engine() { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let addr = Contracts::bare_instantiate( - ALICE, - 100 * min_balance, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(100 * min_balance) + .build_and_unwrap_account_id(); // Call the contract with a fixed gas limit. It must run out of gas because it just // loops forever. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr, // newly created account - 0, - Weight::from_parts(1_000_000_000_000, u64::MAX), - None, - vec![], - ), + builder::call(addr) + .gas_limit(Weight::from_parts(1_000_000_000_000, u64::MAX)) + .build(), Error::::OutOfGas, ); }); @@ -899,36 +834,18 @@ fn run_out_of_fuel_host() { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(code)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); let gas_limit = Weight::from_parts(u32::MAX as u64, GAS_LIMIT.proof_size()); // Use chain extension to charge more ref_time than it is available. - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - gas_limit, - None, - ExtensionInput { extension_id: 0, func_id: 2, extra: &u32::MAX.encode() }.into(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result; + let result = builder::bare_call(addr.clone()) + .gas_limit(gas_limit) + .data(ExtensionInput { extension_id: 0, func_id: 2, extra: &u32::MAX.encode() }.into()) + .build() + .result; assert_err!(result, >::OutOfGas); }); } @@ -938,63 +855,20 @@ fn gas_syncs_work() { let (code, _code_hash) = compile_module::("caller_is_origin_n").unwrap(); ExtBuilder::default().existential_deposit(200).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_account_id(); - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - 0u32.encode(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr.clone()).data(0u32.encode()).build(); assert_ok!(result.result); let engine_consumed_noop = result.gas_consumed.ref_time(); - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - 1u32.encode(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr.clone()).data(1u32.encode()).build(); assert_ok!(result.result); let gas_consumed_once = result.gas_consumed.ref_time(); let host_consumed_once = ::Schedule::get().host_fn_weights.caller_is_origin.ref_time(); let engine_consumed_once = gas_consumed_once - host_consumed_once - engine_consumed_noop; - let result = Contracts::bare_call( - ALICE, - addr, - 0, - GAS_LIMIT, - None, - 2u32.encode(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr).data(2u32.encode()).build(); assert_ok!(result.result); let gas_consumed_twice = result.gas_consumed.ref_time(); let host_consumed_twice = host_consumed_once * 2; @@ -1018,56 +892,21 @@ fn instantiate_unique_trie_id() { .unwrap(); // Instantiate the contract and store its trie id for later comparison. - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Existing(code_hash), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = + builder::bare_instantiate(Code::Existing(code_hash)).build_and_unwrap_account_id(); let trie_id = get_contract(&addr).trie_id; // Try to instantiate it again without termination should yield an error. assert_err_ignore_postinfo!( - Contracts::instantiate( - RuntimeOrigin::signed(ALICE), - 0, - GAS_LIMIT, - None, - code_hash, - vec![], - vec![], - ), + builder::instantiate(code_hash).build(), >::DuplicateContract, ); // Terminate the contract. - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(builder::call(addr.clone()).build()); // Re-Instantiate after termination. - assert_ok!(Contracts::instantiate( - RuntimeOrigin::signed(ALICE), - 0, - GAS_LIMIT, - None, - code_hash, - vec![], - vec![], - )); + assert_ok!(builder::instantiate(code_hash).build()); // Trie ids shouldn't match or we might have a collision assert_ne!(trie_id, get_contract(&addr).trie_id); @@ -1081,42 +920,22 @@ fn storage_max_value_limit() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { // Create let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let addr = Contracts::bare_instantiate( - ALICE, - 30_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(30_000) + .build_and_unwrap_account_id(); get_contract(&addr); // Call contract with allowed storage value. - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT.set_ref_time(GAS_LIMIT.ref_time() * 2), // we are copying a huge buffer - None, - ::Schedule::get().limits.payload_len.encode(), - )); + assert_ok!(builder::call(addr.clone()) + .gas_limit(GAS_LIMIT.set_ref_time(GAS_LIMIT.ref_time() * 2)) // we are copying a huge buffer + .data(::Schedule::get().limits.payload_len.encode()) + .build()); // Call contract with too large a storage value. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr, - 0, - GAS_LIMIT, - None, - (::Schedule::get().limits.payload_len + 1).encode(), - ), + builder::call(addr) + .data((::Schedule::get().limits.payload_len + 1).encode()) + .build(), Error::::ValueTooLarge, ); }); @@ -1132,20 +951,9 @@ fn deploy_and_call_other_contract() { // Create let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let caller_addr = Contracts::bare_instantiate( - ALICE, - 100_000, - GAS_LIMIT, - None, - Code::Upload(caller_wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let caller_addr = builder::bare_instantiate(Code::Upload(caller_wasm)) + .value(100_000) + .build_and_unwrap_account_id(); Contracts::bare_upload_code(ALICE, callee_wasm, None, Determinism::Enforced).unwrap(); let callee_addr = Contracts::contract_address( @@ -1160,14 +968,9 @@ fn deploy_and_call_other_contract() { // Call BOB contract, which attempts to instantiate and call the callee contract and // makes various assertions on the results from those calls. - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - caller_addr.clone(), - 0, - GAS_LIMIT, - None, - callee_code_hash.as_ref().to_vec(), - )); + assert_ok!(builder::call(caller_addr.clone()) + .data(callee_code_hash.as_ref().to_vec()) + .build()); assert_eq!( System::events(), @@ -1266,20 +1069,9 @@ fn delegate_call() { let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Instantiate the 'caller' - let caller_addr = Contracts::bare_instantiate( - ALICE, - 300_000, - GAS_LIMIT, - None, - Code::Upload(caller_wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let caller_addr = builder::bare_instantiate(Code::Upload(caller_wasm)) + .value(300_000) + .build_and_unwrap_account_id(); // Only upload 'callee' code assert_ok!(Contracts::upload_code( RuntimeOrigin::signed(ALICE), @@ -1288,14 +1080,10 @@ fn delegate_call() { Determinism::Enforced, )); - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - caller_addr.clone(), - 1337, - GAS_LIMIT, - None, - callee_code_hash.as_ref().to_vec(), - )); + assert_ok!(builder::call(caller_addr.clone()) + .value(1337) + .data(callee_code_hash.as_ref().to_vec()) + .build()); }); } @@ -1306,20 +1094,9 @@ fn transfer_expendable_cannot_kill_account() { let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Instantiate the BOB contract. - let addr = Contracts::bare_instantiate( - ALICE, - 1_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(1_000) + .build_and_unwrap_account_id(); // Check that the BOB contract has been instantiated. get_contract(&addr); @@ -1347,7 +1124,7 @@ fn transfer_expendable_cannot_kill_account() { } #[test] -fn cannot_self_destruct_through_draning() { +fn cannot_self_destruct_through_draining() { let (wasm, _code_hash) = compile_module::("drain").unwrap(); ExtBuilder::default().existential_deposit(200).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); @@ -1355,34 +1132,16 @@ fn cannot_self_destruct_through_draning() { let min_balance = Contracts::min_balance(); // Instantiate the BOB contract. - let addr = Contracts::bare_instantiate( - ALICE, - value, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(value) + .build_and_unwrap_account_id(); // Check that the BOB contract has been instantiated. get_contract(&addr); // Call BOB which makes it send all funds to the zero address // The contract code asserts that the transfer fails with the correct error code - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(builder::call(addr.clone()).build()); // Make sure the account wasn't remove by sending all free balance away. assert_eq!( @@ -1400,20 +1159,7 @@ fn cannot_self_destruct_through_storage_refund_after_price_change() { let min_balance = Contracts::min_balance(); // Instantiate the BOB contract. - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_account_id(); let info_deposit = test_utils::contract_info_storage_deposit(&addr); @@ -1423,27 +1169,13 @@ fn cannot_self_destruct_through_storage_refund_after_price_change() { assert_eq!(::Currency::total_balance(&addr), info_deposit + min_balance); // Create 100 bytes of storage with a price of per byte and a single storage item of price 2 - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - 100u32.to_le_bytes().to_vec() - )); + assert_ok!(builder::call(addr.clone()).data(100u32.to_le_bytes().to_vec()).build()); assert_eq!(get_contract(&addr).total_deposit(), info_deposit + 102); // Increase the byte price and trigger a refund. This should not have any influence because // the removal is pro rata and exactly those 100 bytes should have been removed. DEPOSIT_PER_BYTE.with(|c| *c.borrow_mut() = 500); - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - 0u32.to_le_bytes().to_vec() - )); + assert_ok!(builder::call(addr.clone()).data(0u32.to_le_bytes().to_vec()).build()); // Make sure the account wasn't removed by the refund assert_eq!( @@ -1461,20 +1193,9 @@ fn cannot_self_destruct_while_live() { let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Instantiate the BOB contract. - let addr = Contracts::bare_instantiate( - ALICE, - 100_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(100_000) + .build_and_unwrap_account_id(); // Check that the BOB contract has been instantiated. get_contract(&addr); @@ -1482,14 +1203,7 @@ fn cannot_self_destruct_while_live() { // Call BOB with input data, forcing it make a recursive call to itself to // self-destruct, resulting in a trap. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![0], - ), + builder::call(addr.clone()).data(vec![0]).build(), Error::::ContractTrapped, ); @@ -1507,20 +1221,9 @@ fn self_destruct_works() { let min_balance = Contracts::min_balance(); // Instantiate the BOB contract. - let addr = Contracts::bare_instantiate( - ALICE, - 100_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(100_000) + .build_and_unwrap_account_id(); // Check that the BOB contract has been instantiated. let _ = get_contract(&addr); @@ -1531,10 +1234,7 @@ fn self_destruct_works() { initialize_block(2); // Call BOB without input data which triggers termination. - assert_matches!( - Contracts::call(RuntimeOrigin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, None, vec![],), - Ok(_) - ); + assert_matches!(builder::call(addr.clone()).build(), Ok(_)); // Check that code is still there but refcount dropped to zero. assert_refcount!(&code_hash, 0); @@ -1621,20 +1321,10 @@ fn destroy_contract_and_transfer_funds() { // This deploys the BOB contract, which in turn deploys the CHARLIE contract during // construction. - let addr_bob = Contracts::bare_instantiate( - ALICE, - 200_000, - GAS_LIMIT, - None, - Code::Upload(caller_wasm), - callee_code_hash.as_ref().to_vec(), - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_bob = builder::bare_instantiate(Code::Upload(caller_wasm)) + .value(200_000) + .data(callee_code_hash.as_ref().to_vec()) + .build_and_unwrap_account_id(); // Check that the CHARLIE contract has been instantiated. let addr_charlie = @@ -1642,14 +1332,7 @@ fn destroy_contract_and_transfer_funds() { get_contract(&addr_charlie); // Call BOB, which calls CHARLIE, forcing CHARLIE to self-destruct. - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr_bob, - 0, - GAS_LIMIT, - None, - addr_charlie.encode(), - )); + assert_ok!(builder::call(addr_bob).data(addr_charlie.encode()).build()); // Check that CHARLIE has moved on to the great beyond (ie. died). assert!(get_contract_checked(&addr_charlie).is_none()); @@ -1662,17 +1345,9 @@ fn cannot_self_destruct_in_constructor() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - // Fail to instantiate the BOB because the contructor calls seal_terminate. + // Fail to instantiate the BOB because the constructor calls seal_terminate. assert_err_ignore_postinfo!( - Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), - 100_000, - GAS_LIMIT, - None, - wasm, - vec![], - vec![], - ), + builder::instantiate_with_code(wasm).value(100_000).build(), Error::::TerminatedInConstructor, ); }); @@ -1686,20 +1361,9 @@ fn crypto_hashes() { let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Instantiate the CRYPTO_HASHES contract. - let addr = Contracts::bare_instantiate( - ALICE, - 100_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(100_000) + .build_and_unwrap_account_id(); // Perform the call. let input = b"_DEAD_BEEF"; use sp_io::hashing::*; @@ -1748,36 +1412,13 @@ fn transfer_return_code() { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); // Contract has only the minimal balance so any transfer will fail. ::Currency::set_balance(&addr, min_balance); - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr.clone()).build_and_unwrap_result(); assert_return_code!(result, RuntimeReturnCode::TransferFailed); }); } @@ -1791,113 +1432,60 @@ fn call_return_code() { let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); let _ = ::Currency::set_balance(&CHARLIE, 1000 * min_balance); - let addr_bob = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(caller_code), - vec![0], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_bob = builder::bare_instantiate(Code::Upload(caller_code)) + .value(min_balance * 100) + .data(vec![0]) + .build_and_unwrap_account_id(); ::Currency::set_balance(&addr_bob, min_balance); // Contract calls into Django which is no valid contract - let result = Contracts::bare_call( - ALICE, - addr_bob.clone(), - 0, - GAS_LIMIT, - None, - AsRef::<[u8]>::as_ref(&DJANGO).to_vec(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr_bob.clone()) + .data(AsRef::<[u8]>::as_ref(&DJANGO).to_vec()) + .build_and_unwrap_result(); assert_return_code!(result, RuntimeReturnCode::NotCallable); - let addr_django = Contracts::bare_instantiate( - CHARLIE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(callee_code), - vec![0], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_django = builder::bare_instantiate(Code::Upload(callee_code)) + .origin(CHARLIE) + .value(min_balance * 100) + .data(vec![0]) + .build_and_unwrap_account_id(); ::Currency::set_balance(&addr_django, min_balance); // Contract has only the minimal balance so any transfer will fail. - let result = Contracts::bare_call( - ALICE, - addr_bob.clone(), - 0, - GAS_LIMIT, - None, - AsRef::<[u8]>::as_ref(&addr_django) - .iter() - .chain(&0u32.to_le_bytes()) - .cloned() - .collect(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr_bob.clone()) + .data( + AsRef::<[u8]>::as_ref(&addr_django) + .iter() + .chain(&0u32.to_le_bytes()) + .cloned() + .collect(), + ) + .build_and_unwrap_result(); assert_return_code!(result, RuntimeReturnCode::TransferFailed); // Contract has enough balance but callee reverts because "1" is passed. ::Currency::set_balance(&addr_bob, min_balance + 1000); - let result = Contracts::bare_call( - ALICE, - addr_bob.clone(), - 0, - GAS_LIMIT, - None, - AsRef::<[u8]>::as_ref(&addr_django) - .iter() - .chain(&1u32.to_le_bytes()) - .cloned() - .collect(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr_bob.clone()) + .data( + AsRef::<[u8]>::as_ref(&addr_django) + .iter() + .chain(&1u32.to_le_bytes()) + .cloned() + .collect(), + ) + .build_and_unwrap_result(); assert_return_code!(result, RuntimeReturnCode::CalleeReverted); // Contract has enough balance but callee traps because "2" is passed. - let result = Contracts::bare_call( - ALICE, - addr_bob, - 0, - GAS_LIMIT, - None, - AsRef::<[u8]>::as_ref(&addr_django) - .iter() - .chain(&2u32.to_le_bytes()) - .cloned() - .collect(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr_bob) + .data( + AsRef::<[u8]>::as_ref(&addr_django) + .iter() + .chain(&2u32.to_le_bytes()) + .cloned() + .collect(), + ) + .build_and_unwrap_result(); assert_return_code!(result, RuntimeReturnCode::CalleeTrapped); }); } @@ -1912,95 +1500,34 @@ fn instantiate_return_code() { let _ = ::Currency::set_balance(&CHARLIE, 1000 * min_balance); let callee_hash = callee_hash.as_ref().to_vec(); - assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), - min_balance * 100, - GAS_LIMIT, - None, - callee_code, - vec![], - vec![], - )); + assert_ok!(builder::instantiate_with_code(callee_code).value(min_balance * 100).build()); - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(caller_code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(caller_code)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); // Contract has only the minimal balance so any transfer will fail. ::Currency::set_balance(&addr, min_balance); - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - callee_hash.clone(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr.clone()) + .data(callee_hash.clone()) + .build_and_unwrap_result(); assert_return_code!(result, RuntimeReturnCode::TransferFailed); // Contract has enough balance but the passed code hash is invalid ::Currency::set_balance(&addr, min_balance + 10_000); - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![0; 33], - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr.clone()).data(vec![0; 33]).build_and_unwrap_result(); assert_return_code!(result, RuntimeReturnCode::CodeNotFound); // Contract has enough balance but callee reverts because "1" is passed. - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - callee_hash.iter().chain(&1u32.to_le_bytes()).cloned().collect(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr.clone()) + .data(callee_hash.iter().chain(&1u32.to_le_bytes()).cloned().collect()) + .build_and_unwrap_result(); assert_return_code!(result, RuntimeReturnCode::CalleeReverted); // Contract has enough balance but callee traps because "2" is passed. - let result = Contracts::bare_call( - ALICE, - addr, - 0, - GAS_LIMIT, - None, - callee_hash.iter().chain(&2u32.to_le_bytes()).cloned().collect(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr) + .data(callee_hash.iter().chain(&2u32.to_le_bytes()).cloned().collect()) + .build_and_unwrap_result(); assert_return_code!(result, RuntimeReturnCode::CalleeTrapped); }); } @@ -2013,15 +1540,7 @@ fn disabled_chain_extension_wont_deploy() { let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); TestExtension::disable(); assert_err_ignore_postinfo!( - Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), - 3 * min_balance, - GAS_LIMIT, - None, - code, - vec![], - vec![], - ), + builder::instantiate_with_code(code).value(3 * min_balance).build(), >::CodeRejected, ); }); @@ -2033,23 +1552,12 @@ fn disabled_chain_extension_errors_on_call() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(code)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); TestExtension::disable(); assert_err_ignore_postinfo!( - Contracts::call(RuntimeOrigin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, None, vec![],), + builder::call(addr.clone()).build(), Error::::CodeRejected, ); }); @@ -2061,141 +1569,61 @@ fn chain_extension_works() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(code)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); // 0 = read input buffer and pass it through as output let input: Vec = ExtensionInput { extension_id: 0, func_id: 0, extra: &[99] }.into(); - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - input.clone(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr.clone()).data(input.clone()).build(); assert_eq!(TestExtension::last_seen_buffer(), input); assert_eq!(result.result.unwrap().data, input); // 1 = treat inputs as integer primitives and store the supplied integers - Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - ExtensionInput { extension_id: 0, func_id: 1, extra: &[] }.into(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + builder::bare_call(addr.clone()) + .data(ExtensionInput { extension_id: 0, func_id: 1, extra: &[] }.into()) + .build_and_unwrap_result(); assert_eq!(TestExtension::last_seen_input_len(), 4); // 2 = charge some extra weight (amount supplied in the fifth byte) - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - ExtensionInput { extension_id: 0, func_id: 2, extra: &0u32.encode() }.into(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr.clone()) + .data(ExtensionInput { extension_id: 0, func_id: 2, extra: &0u32.encode() }.into()) + .build(); assert_ok!(result.result); let gas_consumed = result.gas_consumed; - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - ExtensionInput { extension_id: 0, func_id: 2, extra: &42u32.encode() }.into(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr.clone()) + .data(ExtensionInput { extension_id: 0, func_id: 2, extra: &42u32.encode() }.into()) + .build(); assert_ok!(result.result); assert_eq!(result.gas_consumed.ref_time(), gas_consumed.ref_time() + 42); - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - ExtensionInput { extension_id: 0, func_id: 2, extra: &95u32.encode() }.into(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr.clone()) + .data(ExtensionInput { extension_id: 0, func_id: 2, extra: &95u32.encode() }.into()) + .build(); assert_ok!(result.result); assert_eq!(result.gas_consumed.ref_time(), gas_consumed.ref_time() + 95); // 3 = diverging chain extension call that sets flags to 0x1 and returns a fixed buffer - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - ExtensionInput { extension_id: 0, func_id: 3, extra: &[] }.into(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr.clone()) + .data(ExtensionInput { extension_id: 0, func_id: 3, extra: &[] }.into()) + .build_and_unwrap_result(); assert_eq!(result.flags, ReturnFlags::REVERT); assert_eq!(result.data, vec![42, 99]); // diverging to second chain extension that sets flags to 0x1 and returns a fixed buffer // We set the MSB part to 1 (instead of 0) which routes the request into the second // extension - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - ExtensionInput { extension_id: 1, func_id: 0, extra: &[] }.into(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr.clone()) + .data(ExtensionInput { extension_id: 1, func_id: 0, extra: &[] }.into()) + .build_and_unwrap_result(); assert_eq!(result.flags, ReturnFlags::REVERT); assert_eq!(result.data, vec![0x4B, 0x1D]); // Diverging to third chain extension that is disabled // We set the MSB part to 2 (instead of 0) which routes the request into the third extension assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - ExtensionInput { extension_id: 2, func_id: 0, extra: &[] }.into(), - ), + builder::call(addr.clone()) + .data(ExtensionInput { extension_id: 2, func_id: 0, extra: &[] }.into()) + .build(), Error::::NoChainExtension, ); }); @@ -2207,20 +1635,9 @@ fn chain_extension_temp_storage_works() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(code)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); // Call func 0 and func 1 back to back. let stop_recursion = 0u8; @@ -2231,20 +1648,7 @@ fn chain_extension_temp_storage_works() { .as_ref(), ); - assert_ok!( - Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - input.clone(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - ); + assert_ok!(builder::bare_call(addr.clone()).data(input.clone()).build().result); }) } @@ -2255,20 +1659,9 @@ fn lazy_removal_works() { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(code)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); let info = get_contract(&addr); let trie = &info.child_trie_info(); @@ -2277,14 +1670,7 @@ fn lazy_removal_works() { child::put(trie, &[99], &42); // Terminate the contract - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(builder::call(addr.clone()).build()); // Contract info should be gone assert!(!>::contains_key(&addr)); @@ -2309,20 +1695,10 @@ fn lazy_batch_removal_works() { let mut tries: Vec = vec![]; for i in 0..3u8 { - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(code.clone()), - vec![], - vec![i], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(code.clone())) + .value(min_balance * 100) + .salt(vec![i]) + .build_and_unwrap_account_id(); let info = get_contract(&addr); let trie = &info.child_trie_info(); @@ -2332,14 +1708,7 @@ fn lazy_batch_removal_works() { // Terminate the contract. Contract info should be gone, but value should be still there // as the lazy removal did not run, yet. - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(builder::call(addr.clone()).build()); assert!(!>::contains_key(&addr)); assert_matches!(child::get(trie, &[99]), Some(42)); @@ -2375,20 +1744,9 @@ fn lazy_removal_partial_remove_works() { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(code)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); let info = get_contract(&addr); @@ -2399,14 +1757,7 @@ fn lazy_removal_partial_remove_works() { >::insert(&addr, info.clone()); // Terminate the contract - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(builder::call(addr.clone()).build()); // Contract info should be gone assert!(!>::contains_key(&addr)); @@ -2457,20 +1808,9 @@ fn lazy_removal_does_no_run_on_low_remaining_weight() { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(code)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); let info = get_contract(&addr); let trie = &info.child_trie_info(); @@ -2479,14 +1819,7 @@ fn lazy_removal_does_no_run_on_low_remaining_weight() { child::put(trie, &[99], &42); // Terminate the contract - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(builder::call(addr.clone()).build()); // Contract info should be gone assert!(!>::contains_key(&addr)); @@ -2529,20 +1862,9 @@ fn lazy_removal_does_not_use_all_weight() { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(code)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); let info = get_contract(&addr); let (weight_per_key, max_keys) = ContractInfo::::deletion_budget(weight_limit); @@ -2559,14 +1881,7 @@ fn lazy_removal_does_not_use_all_weight() { >::insert(&addr, info.clone()); // Terminate the contract - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(builder::call(addr.clone()).build()); // Contract info should be gone assert!(!>::contains_key(&addr)); @@ -2620,20 +1935,10 @@ fn deletion_queue_ring_buffer_overflow() { // add 3 contracts to the deletion queue for i in 0..3u8 { - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(code.clone()), - vec![], - vec![i], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(code.clone())) + .value(min_balance * 100) + .salt(vec![i]) + .build_and_unwrap_account_id(); let info = get_contract(&addr); let trie = &info.child_trie_info(); @@ -2643,14 +1948,7 @@ fn deletion_queue_ring_buffer_overflow() { // Terminate the contract. Contract info should be gone, but value should be still // there as the lazy removal did not run, yet. - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(builder::call(addr.clone()).build()); assert!(!>::contains_key(&addr)); assert_matches!(child::get(trie, &[99]), Some(42)); @@ -2678,87 +1976,36 @@ fn refcounter() { let min_balance = Contracts::min_balance(); // Create two contracts with the same code and check that they do in fact share it. - let addr0 = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(wasm.clone()), - vec![], - vec![0], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; - let addr1 = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(wasm.clone()), - vec![], - vec![1], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr0 = builder::bare_instantiate(Code::Upload(wasm.clone())) + .value(min_balance * 100) + .salt(vec![0]) + .build_and_unwrap_account_id(); + let addr1 = builder::bare_instantiate(Code::Upload(wasm.clone())) + .value(min_balance * 100) + .salt(vec![1]) + .build_and_unwrap_account_id(); assert_refcount!(code_hash, 2); // Sharing should also work with the usual instantiate call - let addr2 = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Existing(code_hash), - vec![], - vec![2], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr2 = builder::bare_instantiate(Code::Existing(code_hash)) + .value(min_balance * 100) + .salt(vec![2]) + .build_and_unwrap_account_id(); assert_refcount!(code_hash, 3); - - // Terminating one contract should decrement the refcount - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr0, - 0, - GAS_LIMIT, - None, - vec![] - )); + + // Terminating one contract should decrement the refcount + assert_ok!(builder::call(addr0).build()); assert_refcount!(code_hash, 2); // remove another one - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr1, - 0, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(builder::call(addr1).build()); assert_refcount!(code_hash, 1); // Pristine code should still be there PristineCode::::get(code_hash).unwrap(); // remove the last contract - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr2, - 0, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(builder::call(addr2).build()); assert_refcount!(code_hash, 0); // refcount is `0` but code should still exists because it needs to be removed manually @@ -2772,31 +2019,10 @@ fn debug_message_works() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let addr = Contracts::bare_instantiate( - ALICE, - 30_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; - let result = Contracts::bare_call( - ALICE, - addr, - 0, - GAS_LIMIT, - None, - vec![], - DebugInfo::UnsafeDebug, - CollectEvents::Skip, - Determinism::Enforced, - ); + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(30_000) + .build_and_unwrap_account_id(); + let result = builder::bare_call(addr).debug(DebugInfo::UnsafeDebug).build(); assert_matches!(result.result, Ok(_)); assert_eq!(std::str::from_utf8(&result.debug_message).unwrap(), "Hello World!"); @@ -2809,32 +2035,11 @@ fn debug_message_logging_disabled() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let addr = Contracts::bare_instantiate( - ALICE, - 30_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(30_000) + .build_and_unwrap_account_id(); // disable logging by passing `false` - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr.clone()).build(); assert_matches!(result.result, Ok(_)); // the dispatchables always run without debugging assert_ok!(Contracts::call(RuntimeOrigin::signed(ALICE), addr, 0, GAS_LIMIT, None, vec![])); @@ -2848,31 +2053,10 @@ fn debug_message_invalid_utf8() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let addr = Contracts::bare_instantiate( - ALICE, - 30_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; - let result = Contracts::bare_call( - ALICE, - addr, - 0, - GAS_LIMIT, - None, - vec![], - DebugInfo::UnsafeDebug, - CollectEvents::Skip, - Determinism::Enforced, - ); + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(30_000) + .build_and_unwrap_account_id(); + let result = builder::bare_call(addr).debug(DebugInfo::UnsafeDebug).build(); assert_ok!(result.result); assert!(result.debug_message.is_empty()); }); @@ -2887,50 +2071,17 @@ fn gas_estimation_for_subcalls() { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 2_000 * min_balance); - let addr_caller = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(caller_code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_caller = builder::bare_instantiate(Code::Upload(caller_code)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); - let addr_dummy = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(dummy_code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_dummy = builder::bare_instantiate(Code::Upload(dummy_code)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); - let addr_call_runtime = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(call_runtime_code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_call_runtime = builder::bare_instantiate(Code::Upload(call_runtime_code)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); // Run the test for all of those weight limits for the subcall let weights = [ @@ -2966,17 +2117,7 @@ fn gas_estimation_for_subcalls() { .collect(); // Call in order to determine the gas that is required for this call - let result = Contracts::bare_call( - ALICE, - addr_caller.clone(), - 0, - GAS_LIMIT, - None, - input.clone(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr_caller.clone()).data(input.clone()).build(); assert_ok!(&result.result); // If the out of gas happens in the subcall the caller contract @@ -2992,51 +2133,33 @@ fn gas_estimation_for_subcalls() { // Make the same call using the estimated gas. Should succeed. assert_ok!( - Contracts::bare_call( - ALICE, - addr_caller.clone(), - 0, - result.gas_required, - Some(result.storage_deposit.charge_or_zero()), - input.clone(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result + builder::bare_call(addr_caller.clone()) + .gas_limit(result.gas_required) + .storage_deposit_limit(Some(result.storage_deposit.charge_or_zero())) + .data(input.clone()) + .build() + .result ); // Check that it fails with too little ref_time assert_err!( - Contracts::bare_call( - ALICE, - addr_caller.clone(), - 0, - result.gas_required.sub_ref_time(1), - Some(result.storage_deposit.charge_or_zero()), - input.clone(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result, + builder::bare_call(addr_caller.clone()) + .gas_limit(result.gas_required.sub_ref_time(1)) + .storage_deposit_limit(Some(result.storage_deposit.charge_or_zero())) + .data(input.clone()) + .build() + .result, error, ); // Check that it fails with too little proof_size assert_err!( - Contracts::bare_call( - ALICE, - addr_caller.clone(), - 0, - result.gas_required.sub_proof_size(1), - Some(result.storage_deposit.charge_or_zero()), - input, - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result, + builder::bare_call(addr_caller.clone()) + .gas_limit(result.gas_required.sub_proof_size(1)) + .storage_deposit_limit(Some(result.storage_deposit.charge_or_zero())) + .data(input) + .build() + .result, error, ); } @@ -3052,20 +2175,10 @@ fn gas_estimation_call_runtime() { let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); let _ = ::Currency::set_balance(&CHARLIE, 1000 * min_balance); - let addr_caller = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(caller_code), - vec![], - vec![0], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_caller = builder::bare_instantiate(Code::Upload(caller_code)) + .value(min_balance * 100) + .salt(vec![0]) + .build_and_unwrap_account_id(); // Call something trivial with a huge gas limit so that we can observe the effects // of pre-charging. This should create a difference between consumed and required. @@ -3073,17 +2186,7 @@ fn gas_estimation_call_runtime() { pre_charge: Weight::from_parts(10_000_000, 1_000), actual_weight: Weight::from_parts(100, 100), }); - let result = Contracts::bare_call( - ALICE, - addr_caller.clone(), - 0, - GAS_LIMIT, - None, - call.encode(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr_caller.clone()).data(call.encode()).build(); // contract encodes the result of the dispatch runtime let outcome = u32::decode(&mut result.result.unwrap().data.as_ref()).unwrap(); assert_eq!(outcome, 0); @@ -3091,18 +2194,11 @@ fn gas_estimation_call_runtime() { // Make the same call using the required gas. Should succeed. assert_ok!( - Contracts::bare_call( - ALICE, - addr_caller, - 0, - result.gas_required, - None, - call.encode(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result + builder::bare_call(addr_caller) + .gas_limit(result.gas_required) + .data(call.encode()) + .build() + .result ); }); } @@ -3116,35 +2212,15 @@ fn call_runtime_reentrancy_guarded() { let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); let _ = ::Currency::set_balance(&CHARLIE, 1000 * min_balance); - let addr_caller = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(caller_code), - vec![], - vec![0], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_caller = builder::bare_instantiate(Code::Upload(caller_code)) + .value(min_balance * 100) + .salt(vec![0]) + .build_and_unwrap_account_id(); - let addr_callee = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(callee_code), - vec![], - vec![1], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_callee = builder::bare_instantiate(Code::Upload(callee_code)) + .value(min_balance * 100) + .salt(vec![1]) + .build_and_unwrap_account_id(); // Call pallet_contracts call() dispatchable let call = RuntimeCall::Contracts(crate::Call::call { @@ -3157,19 +2233,9 @@ fn call_runtime_reentrancy_guarded() { // Call runtime to re-enter back to contracts engine by // calling dummy contract - let result = Contracts::bare_call( - ALICE, - addr_caller.clone(), - 0, - GAS_LIMIT, - None, - call.encode(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr_caller.clone()) + .data(call.encode()) + .build_and_unwrap_result(); // Call to runtime should fail because of the re-entrancy guard assert_return_code!(result, RuntimeReturnCode::CallRuntimeFailed); }); @@ -3183,20 +2249,9 @@ fn ecdsa_recover() { let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Instantiate the ecdsa_recover contract. - let addr = Contracts::bare_instantiate( - ALICE, - 100_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(100_000) + .build_and_unwrap_account_id(); #[rustfmt::skip] let signature: [u8; 65] = [ @@ -3246,17 +2301,10 @@ fn bare_instantiate_returns_events() { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let result = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::UnsafeCollect, - ); + let result = builder::bare_instantiate(Code::Upload(wasm)) + .value(min_balance * 100) + .collect_events(CollectEvents::UnsafeCollect) + .build(); let events = result.events.unwrap(); assert!(!events.is_empty()); @@ -3271,17 +2319,7 @@ fn bare_instantiate_does_not_return_events() { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let result = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ); + let result = builder::bare_instantiate(Code::Upload(wasm)).value(min_balance * 100).build(); let events = result.events; assert!(!System::events().is_empty()); @@ -3296,32 +2334,13 @@ fn bare_call_returns_events() { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![], - DebugInfo::Skip, - CollectEvents::UnsafeCollect, - Determinism::Enforced, - ); + let result = builder::bare_call(addr.clone()) + .collect_events(CollectEvents::UnsafeCollect) + .build(); let events = result.events.unwrap(); assert_return_code!(&result.result.unwrap(), RuntimeReturnCode::Success); @@ -3337,32 +2356,11 @@ fn bare_call_does_not_return_events() { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr.clone()).build(); let events = result.events; assert_return_code!(&result.result.unwrap(), RuntimeReturnCode::Success); @@ -3379,20 +2377,9 @@ fn sr25519_verify() { let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Instantiate the sr25519_verify contract. - let addr = Contracts::bare_instantiate( - ALICE, - 100_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(100_000) + .build_and_unwrap_account_id(); let call_with = |message: &[u8; 11]| { // Alice's signature for "hello world" @@ -3450,34 +2437,10 @@ fn failed_deposit_charge_should_roll_back_call() { let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Instantiate both contracts. - let addr_caller = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm_caller.clone()), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; - let addr_callee = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm_callee.clone()), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_caller = builder::bare_instantiate(Code::Upload(wasm_caller.clone())) + .build_and_unwrap_account_id(); + let addr_callee = builder::bare_instantiate(Code::Upload(wasm_callee.clone())) + .build_and_unwrap_account_id(); // Give caller proxy access to Alice. assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(ALICE), addr_caller.clone(), (), 0)); @@ -3701,15 +2664,7 @@ fn remove_code_in_use() { ExtBuilder::default().existential_deposit(100).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), - 0, - GAS_LIMIT, - None, - wasm, - vec![], - vec![], - )); + assert_ok!(builder::instantiate_with_code(wasm).build()); // Drop previous events initialize_block(2); @@ -3753,20 +2708,7 @@ fn instantiate_with_zero_balance_works() { initialize_block(2); // Instantiate the BOB contract. - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_account_id(); // Ensure the contract was stored and get expected deposit amount to be reserved. let deposit_expected = expected_deposit(ensure_stored(code_hash)); @@ -3850,20 +2792,9 @@ fn instantiate_with_below_existential_deposit_works() { initialize_block(2); // Instantiate the BOB contract. - let addr = Contracts::bare_instantiate( - ALICE, - value, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(value) + .build_and_unwrap_account_id(); // Ensure the contract was stored and get expected deposit amount to be reserved. let deposit_expected = expected_deposit(ensure_stored(code_hash)); @@ -3949,20 +2880,7 @@ fn storage_deposit_works() { ExtBuilder::default().existential_deposit(200).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_account_id(); let mut deposit = test_utils::contract_info_storage_deposit(&addr); @@ -3970,41 +2888,23 @@ fn storage_deposit_works() { initialize_block(2); // Create storage - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 42, - GAS_LIMIT, - None, - (1_000u32, 5_000u32).encode(), - )); + assert_ok!(builder::call(addr.clone()) + .value(42) + .data((1_000u32, 5_000u32).encode()) + .build()); // 4 is for creating 2 storage items let charged0 = 4 + 1_000 + 5_000; deposit += charged0; assert_eq!(get_contract(&addr).total_deposit(), deposit); - - // Add more storage (but also remove some) - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - (2_000u32, 4_900u32).encode(), - )); + + // Add more storage (but also remove some) + assert_ok!(builder::call(addr.clone()).data((2_000u32, 4_900u32).encode()).build()); let charged1 = 1_000 - 100; deposit += charged1; assert_eq!(get_contract(&addr).total_deposit(), deposit); // Remove more storage (but also add some) - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - (2_100u32, 900u32).encode(), - )); + assert_ok!(builder::call(addr.clone()).data((2_100u32, 900u32).encode()).build()); // -1 for numeric instability let refunded0 = 4_000 - 100 - 1; deposit -= refunded0; @@ -4093,43 +2993,12 @@ fn storage_deposit_callee_works() { let min_balance = Contracts::min_balance(); // Create both contracts: Constructors do nothing. - let addr_caller = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm_caller), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; - let addr_callee = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm_callee), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_caller = + builder::bare_instantiate(Code::Upload(wasm_caller)).build_and_unwrap_account_id(); + let addr_callee = + builder::bare_instantiate(Code::Upload(wasm_callee)).build_and_unwrap_account_id(); - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr_caller, - 0, - GAS_LIMIT, - None, - (100u32, &addr_callee).encode() - )); + assert_ok!(builder::call(addr_caller).data((100u32, &addr_callee).encode()).build()); let callee = get_contract(&addr_callee); let deposit = DepositPerByte::get() * 100 + DepositPerItem::get() * 1; @@ -4152,20 +3021,7 @@ fn set_code_extrinsic() { ExtBuilder::default().existential_deposit(100).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_account_id(); assert_ok!(Contracts::upload_code( RuntimeOrigin::signed(ALICE), @@ -4189,7 +3045,7 @@ fn set_code_extrinsic() { assert_eq!(get_contract(&addr).code_hash, code_hash); assert_refcount!(&code_hash, 1); assert_refcount!(&new_code_hash, 0); - assert_eq!(System::events(), vec![],); + assert_eq!(System::events(), vec![]); // contract must exist assert_noop!( @@ -4199,7 +3055,7 @@ fn set_code_extrinsic() { assert_eq!(get_contract(&addr).code_hash, code_hash); assert_refcount!(&code_hash, 1); assert_refcount!(&new_code_hash, 0); - assert_eq!(System::events(), vec![],); + assert_eq!(System::events(), vec![]); // new code hash must exist assert_noop!( @@ -4209,7 +3065,7 @@ fn set_code_extrinsic() { assert_eq!(get_contract(&addr).code_hash, code_hash); assert_refcount!(&code_hash, 1); assert_refcount!(&new_code_hash, 0); - assert_eq!(System::events(), vec![],); + assert_eq!(System::events(), vec![]); // successful call assert_ok!(Contracts::set_code(RuntimeOrigin::root(), addr.clone(), new_code_hash)); @@ -4239,20 +3095,9 @@ fn slash_cannot_kill_account() { let _ = ::Currency::set_balance(&ALICE, 1_000_000); let min_balance = Contracts::min_balance(); - let addr = Contracts::bare_instantiate( - ALICE, - value, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(value) + .build_and_unwrap_account_id(); // Drop previous events initialize_block(2); @@ -4303,29 +3148,13 @@ fn contract_reverted() { // Calling extrinsic: revert leads to an error assert_err_ignore_postinfo!( - Contracts::instantiate( - RuntimeOrigin::signed(ALICE), - 0, - GAS_LIMIT, - None, - code_hash, - input.clone(), - vec![], - ), + builder::instantiate(code_hash).data(input.clone()).build(), >::ContractReverted, ); // Calling extrinsic: revert leads to an error assert_err_ignore_postinfo!( - Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), - 0, - GAS_LIMIT, - None, - wasm, - input.clone(), - vec![], - ), + builder::instantiate_with_code(wasm).data(input.clone()).build(), >::ContractReverted, ); @@ -4333,66 +3162,26 @@ fn contract_reverted() { // This is just a different way of transporting the error that allows the read out // the `data` which is only there on success. Obviously, the contract isn't // instantiated. - let result = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Existing(code_hash), - input.clone(), - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap(); + let result = builder::bare_instantiate(Code::Existing(code_hash)) + .data(input.clone()) + .build_and_unwrap_result(); assert_eq!(result.result.flags, flags); assert_eq!(result.result.data, buffer); assert!(!>::contains_key(result.account_id)); // Pass empty flags and therefore successfully instantiate the contract for later use. - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Existing(code_hash), - ReturnFlags::empty().bits().encode(), - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Existing(code_hash)) + .data(ReturnFlags::empty().bits().encode()) + .build_and_unwrap_account_id(); // Calling extrinsic: revert leads to an error assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - input.clone() - ), + builder::call(addr.clone()).data(input.clone()).build(), >::ContractReverted, ); // Calling directly: revert leads to success but the flags indicate the error - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - input, - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr.clone()).data(input).build_and_unwrap_result(); assert_eq!(result.flags, flags); assert_eq!(result.data, buffer); }); @@ -4407,20 +3196,9 @@ fn set_code_hash() { let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Instantiate the 'caller' - let contract_addr = Contracts::bare_instantiate( - ALICE, - 300_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let contract_addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(300_000) + .build_and_unwrap_account_id(); // upload new code assert_ok!(Contracts::upload_code( RuntimeOrigin::signed(ALICE), @@ -4432,35 +3210,16 @@ fn set_code_hash() { System::reset_events(); // First call sets new code_hash and returns 1 - let result = Contracts::bare_call( - ALICE, - contract_addr.clone(), - 0, - GAS_LIMIT, - None, - new_code_hash.as_ref().to_vec(), - DebugInfo::UnsafeDebug, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(contract_addr.clone()) + .data(new_code_hash.as_ref().to_vec()) + .debug(DebugInfo::UnsafeDebug) + .build_and_unwrap_result(); assert_return_code!(result, 1); // Second calls new contract code that returns 2 - let result = Contracts::bare_call( - ALICE, - contract_addr.clone(), - 0, - GAS_LIMIT, - None, - vec![], - DebugInfo::UnsafeDebug, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(contract_addr.clone()) + .debug(DebugInfo::UnsafeDebug) + .build_and_unwrap_result(); assert_return_code!(result, 2); // Checking for the last event only @@ -4512,37 +3271,16 @@ fn storage_deposit_limit_is_enforced() { // Setting insufficient storage_deposit should fail. assert_err!( - Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - Some((2 * min_balance + 3 - 1).into()), /* expected deposit is 2 * ed + 3 for - * the call */ - Code::Upload(wasm.clone()), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result, + builder::bare_instantiate(Code::Upload(wasm.clone())) + // expected deposit is 2 * ed + 3 for the call + .storage_deposit_limit(Some((2 * min_balance + 3 - 1).into())) + .build() + .result, >::StorageDepositLimitExhausted, ); // Instantiate the BOB contract. - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_account_id(); let info_deposit = test_utils::contract_info_storage_deposit(&addr); // Check that the BOB contract has been instantiated and has the minimum balance @@ -4553,14 +3291,10 @@ fn storage_deposit_limit_is_enforced() { // setting insufficient deposit limit, as it requires 3 Balance: // 2 for the item added + 1 for the new storage item. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(2)), - 1u32.to_le_bytes().to_vec() - ), + builder::call(addr.clone()) + .storage_deposit_limit(Some(codec::Compact(2))) + .data(1u32.to_le_bytes().to_vec()) + .build(), >::StorageDepositLimitExhausted, ); @@ -4571,26 +3305,12 @@ fn storage_deposit_limit_is_enforced() { // Create 1 byte of storage, should cost 3 Balance: // 2 for the item added + 1 for the new storage item. // Should pass as it fallbacks to DefaultDepositLimit. - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - 1u32.to_le_bytes().to_vec() - )); + assert_ok!(builder::call(addr.clone()).data(1u32.to_le_bytes().to_vec()).build()); // Use 4 more bytes of the storage for the same item, which requires 4 Balance. // Should fail as DefaultDepositLimit is 3 and hence isn't enough. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - 5u32.to_le_bytes().to_vec() - ), + builder::call(addr.clone()).data(5u32.to_le_bytes().to_vec()).build(), >::StorageDepositLimitExhausted, ); }); @@ -4605,45 +3325,17 @@ fn deposit_limit_in_nested_calls() { let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Create both contracts: Constructors do nothing. - let addr_caller = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm_caller), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; - let addr_callee = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm_callee), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_caller = + builder::bare_instantiate(Code::Upload(wasm_caller)).build_and_unwrap_account_id(); + let addr_callee = + builder::bare_instantiate(Code::Upload(wasm_callee)).build_and_unwrap_account_id(); // Create 100 bytes of storage with a price of per byte // This is 100 Balance + 2 Balance for the item - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr_callee.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(102)), - 100u32.to_le_bytes().to_vec() - )); + assert_ok!(builder::call(addr_callee.clone()) + .storage_deposit_limit(Some(codec::Compact(102))) + .data(100u32.to_le_bytes().to_vec()) + .build()); // We do not remove any storage but add a storage item of 12 bytes in the caller // contract. This would cost 12 + 2 = 14 Balance. @@ -4651,14 +3343,10 @@ fn deposit_limit_in_nested_calls() { // This should fail as the specified parent's limit is less than the cost: 13 < // 14. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr_caller.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(13)), - (100u32, &addr_callee, 0u64).encode(), - ), + builder::call(addr_caller.clone()) + .storage_deposit_limit(Some(codec::Compact(13))) + .data((100u32, &addr_callee, 0u64).encode()) + .build(), >::StorageDepositLimitExhausted, ); // Now we specify the parent's limit high enough to cover the caller's storage additions. @@ -4668,14 +3356,10 @@ fn deposit_limit_in_nested_calls() { // This should fail as the specified parent's limit is less than the cost: 14 // < 15. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr_caller.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(14)), - (101u32, &addr_callee, 0u64).encode(), - ), + builder::call(addr_caller.clone()) + .storage_deposit_limit(Some(codec::Compact(14))) + .data((101u32, &addr_callee, 0u64).encode()) + .build(), >::StorageDepositLimitExhausted, ); @@ -4685,14 +3369,10 @@ fn deposit_limit_in_nested_calls() { // call should have a deposit limit of at least 2 Balance. The sub-call should be rolled // back, which is covered by the next test case. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr_caller.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(16)), - (102u32, &addr_callee, 1u64).encode(), - ), + builder::call(addr_caller.clone()) + .storage_deposit_limit(Some(codec::Compact(16))) + .data((102u32, &addr_callee, 1u64).encode()) + .build(), >::StorageDepositLimitExhausted, ); @@ -4700,14 +3380,10 @@ fn deposit_limit_in_nested_calls() { // caller. Note that if previous sub-call wouldn't roll back, this call would pass making // the test case fail. We don't set a special limit for the nested call here. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr_caller.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(0)), - (87u32, &addr_callee, 0u64).encode(), - ), + builder::call(addr_caller.clone()) + .storage_deposit_limit(Some(codec::Compact(0))) + .data((87u32, &addr_callee, 0u64).encode()) + .build(), >::StorageDepositLimitExhausted, ); @@ -4716,28 +3392,19 @@ fn deposit_limit_in_nested_calls() { // Require more than the sender's balance. // We don't set a special limit for the nested call. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr_caller.clone(), - 0, - GAS_LIMIT, - None, - (1200u32, &addr_callee, 1u64).encode(), - ), + builder::call(addr_caller.clone()) + .data((1200u32, &addr_callee, 1u64).encode()) + .build(), >::StorageDepositLimitExhausted, ); // Same as above but allow for the additional deposit of 1 Balance in parent. // We set the special deposit limit of 1 Balance for the nested call, which isn't // enforced as callee frees up storage. This should pass. - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr_caller.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(1)), - (87u32, &addr_callee, 1u64).encode(), - )); + assert_ok!(builder::call(addr_caller.clone()) + .storage_deposit_limit(Some(codec::Compact(1))) + .data((87u32, &addr_callee, 1u64).encode()) + .build()); }); } @@ -4751,35 +3418,13 @@ fn deposit_limit_in_nested_instantiate() { let _ = ::Currency::set_balance(&ALICE, 1_000_000); let _ = ::Currency::set_balance(&BOB, 1_000_000); // Create caller contract - let addr_caller = Contracts::bare_instantiate( - ALICE, - 10_000u64, // this balance is later passed to the deployed contract - GAS_LIMIT, - None, - Code::Upload(wasm_caller), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_caller = builder::bare_instantiate(Code::Upload(wasm_caller)) + .value(10_000u64) // this balance is later passed to the deployed contract + .build_and_unwrap_account_id(); // Deploy a contract to get its occupied storage size - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm_callee), - vec![0, 0, 0, 0], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm_callee)) + .data(vec![0, 0, 0, 0]) + .build_and_unwrap_account_id(); let callee_info_len = ContractInfoOf::::get(&addr).unwrap().encoded_size() as u64; @@ -4795,14 +3440,11 @@ fn deposit_limit_in_nested_instantiate() { // Provided the limit is set to be 1 Balance less, // this call should fail on the return from the caller contract. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(BOB), - addr_caller.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(callee_info_len + 2 + ED + 1)), - (0u32, &code_hash_callee, 0u64).encode(), - ), + builder::call(addr_caller.clone()) + .origin(RuntimeOrigin::signed(BOB)) + .storage_deposit_limit(Some(codec::Compact(callee_info_len + 2 + ED + 1))) + .data((0u32, &code_hash_callee, 0u64).encode()) + .build(), >::StorageDepositLimitExhausted, ); // The charges made on instantiation should be rolled back. @@ -4812,14 +3454,11 @@ fn deposit_limit_in_nested_instantiate() { // byte in the constructor. Hence +1 Balance to the limit is needed. This should fail on the // return from constructor. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(BOB), - addr_caller.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(callee_info_len + 2 + ED + 2)), - (1u32, &code_hash_callee, 0u64).encode(), - ), + builder::call(addr_caller.clone()) + .origin(RuntimeOrigin::signed(BOB)) + .storage_deposit_limit(Some(codec::Compact(callee_info_len + 2 + ED + 2))) + .data((1u32, &code_hash_callee, 0u64).encode()) + .build(), >::StorageDepositLimitExhausted, ); // The charges made on the instantiation should be rolled back. @@ -4829,14 +3468,11 @@ fn deposit_limit_in_nested_instantiate() { // This should fail during the charging for the instantiation in // `RawMeter::charge_instantiate()` assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(BOB), - addr_caller.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(callee_info_len + 2 + ED + 2)), - (0u32, &code_hash_callee, callee_info_len + 2 + ED + 1).encode(), - ), + builder::call(addr_caller.clone()) + .origin(RuntimeOrigin::signed(BOB)) + .storage_deposit_limit(Some(codec::Compact(callee_info_len + 2 + ED + 2))) + .data((0u32, &code_hash_callee, callee_info_len + 2 + ED + 1).encode()) + .build(), >::StorageDepositLimitExhausted, ); // The charges made on the instantiation should be rolled back. @@ -4847,31 +3483,22 @@ fn deposit_limit_in_nested_instantiate() { // Now we set enough limit for the parent call, but insufficient limit for child // instantiate. This should fail right after the constructor execution. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(BOB), - addr_caller.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(callee_info_len + 2 + ED + 3)), // enough parent limit - (1u32, &code_hash_callee, callee_info_len + 2 + ED + 2).encode(), - ), + builder::call(addr_caller.clone()) + .origin(RuntimeOrigin::signed(BOB)) + .storage_deposit_limit(Some(codec::Compact(callee_info_len + 2 + ED + 3))) // enough parent limit + .data((1u32, &code_hash_callee, callee_info_len + 2 + ED + 2).encode()) + .build(), >::StorageDepositLimitExhausted, ); // The charges made on the instantiation should be rolled back. assert_eq!(::Currency::free_balance(&BOB), 1_000_000); // Set enough deposit limit for the child instantiate. This should succeed. - let result = Contracts::bare_call( - BOB, - addr_caller.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(callee_info_len + 2 + ED + 4).into()), - (1u32, &code_hash_callee, callee_info_len + 2 + ED + 3).encode(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr_caller.clone()) + .origin(BOB) + .storage_deposit_limit(Some(codec::Compact(callee_info_len + 2 + ED + 4).into())) + .data((1u32, &code_hash_callee, callee_info_len + 2 + ED + 3).encode()) + .build(); let returned = result.result.unwrap(); // All balance of the caller except ED has been transferred to the callee. @@ -4891,7 +3518,7 @@ fn deposit_limit_in_nested_instantiate() { 1_000_000 - (callee_info_len + 2 + ED + 3) ); // Check that deposit due to be charged still includes these 3 Balance - assert_eq!(result.storage_deposit.charge_or_zero(), (callee_info_len + 2 + ED + 3),) + assert_eq!(result.storage_deposit.charge_or_zero(), (callee_info_len + 2 + ED + 3)) }); } @@ -4905,20 +3532,7 @@ fn deposit_limit_honors_liquidity_restrictions() { let min_balance = Contracts::min_balance(); // Instantiate the BOB contract. - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_account_id(); let info_deposit = test_utils::contract_info_storage_deposit(&addr); // Check that the contract has been instantiated and has the minimum balance @@ -4933,14 +3547,11 @@ fn deposit_limit_honors_liquidity_restrictions() { ) .unwrap(); assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(BOB), - addr.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(200)), - 100u32.to_le_bytes().to_vec() - ), + builder::call(addr.clone()) + .origin(RuntimeOrigin::signed(BOB)) + .storage_deposit_limit(Some(codec::Compact(200))) + .data(100u32.to_le_bytes().to_vec()) + .build(), >::StorageDepositNotEnoughFunds, ); assert_eq!(::Currency::free_balance(&BOB), min_balance); @@ -4956,20 +3567,7 @@ fn deposit_limit_honors_existential_deposit() { let min_balance = Contracts::min_balance(); // Instantiate the BOB contract. - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_account_id(); let info_deposit = test_utils::contract_info_storage_deposit(&addr); @@ -4979,14 +3577,11 @@ fn deposit_limit_honors_existential_deposit() { // check that the deposit can't bring the account below the existential deposit assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(BOB), - addr.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(900)), - 100u32.to_le_bytes().to_vec() - ), + builder::call(addr.clone()) + .origin(RuntimeOrigin::signed(BOB)) + .storage_deposit_limit(Some(codec::Compact(900))) + .data(100u32.to_le_bytes().to_vec()) + .build(), >::StorageDepositNotEnoughFunds, ); assert_eq!(::Currency::free_balance(&BOB), 1_000); @@ -5002,20 +3597,7 @@ fn deposit_limit_honors_min_leftover() { let min_balance = Contracts::min_balance(); // Instantiate the BOB contract. - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_account_id(); let info_deposit = test_utils::contract_info_storage_deposit(&addr); @@ -5026,14 +3608,12 @@ fn deposit_limit_honors_min_leftover() { // check that the minimum leftover (value send) is considered assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(BOB), - addr.clone(), - 400, - GAS_LIMIT, - Some(codec::Compact(500)), - 100u32.to_le_bytes().to_vec() - ), + builder::call(addr.clone()) + .origin(RuntimeOrigin::signed(BOB)) + .value(400) + .storage_deposit_limit(Some(codec::Compact(500))) + .data(100u32.to_le_bytes().to_vec()) + .build(), >::StorageDepositNotEnoughFunds, ); assert_eq!(::Currency::free_balance(&BOB), 1_000); @@ -5069,34 +3649,15 @@ fn cannot_instantiate_indeterministic_code() { // Try to instantiate directly from code assert_err_ignore_postinfo!( - Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), - 0, - GAS_LIMIT, - None, - wasm.clone(), - vec![], - vec![], - ), + builder::instantiate_with_code(wasm.clone()).build(), >::CodeRejected, ); assert_err!( - Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm.clone()), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result, + builder::bare_instantiate(Code::Upload(wasm.clone())).build().result, >::CodeRejected, ); - // Try to upload a non deterministic code as deterministic + // Try to upload a non-deterministic code as deterministic assert_err!( Contracts::upload_code( RuntimeOrigin::signed(ALICE), @@ -5116,48 +3677,17 @@ fn cannot_instantiate_indeterministic_code() { )); assert_err_ignore_postinfo!( - Contracts::instantiate( - RuntimeOrigin::signed(ALICE), - 0, - GAS_LIMIT, - None, - code_hash, - vec![], - vec![], - ), + builder::instantiate(code_hash).build(), >::Indeterministic, ); assert_err!( - Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Existing(code_hash), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result, + builder::bare_instantiate(Code::Existing(code_hash)).build().result, >::Indeterministic, ); // Deploy contract which instantiates another contract - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(caller_wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = + builder::bare_instantiate(Code::Upload(caller_wasm)).build_and_unwrap_account_id(); // Try to instantiate `code_hash` from another contract in deterministic mode assert_err!( @@ -5176,7 +3706,7 @@ fn cannot_instantiate_indeterministic_code() { >::Indeterministic, ); - // Instantiations are not allowed even in non determinism mode + // Instantiations are not allowed even in non-determinism mode assert_err!( >::bare_call( ALICE, @@ -5202,7 +3732,7 @@ fn cannot_set_code_indeterministic_code() { ExtBuilder::default().existential_deposit(200).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - // Put the non deterministic contract on-chain + // Put the non-deterministic contract on-chain assert_ok!(Contracts::upload_code( RuntimeOrigin::signed(ALICE), wasm, @@ -5211,22 +3741,10 @@ fn cannot_set_code_indeterministic_code() { )); // Create the contract that will call `seal_set_code_hash` - let caller_addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(caller_wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let caller_addr = + builder::bare_instantiate(Code::Upload(caller_wasm)).build_and_unwrap_account_id(); - // We do not allow to set the code hash to a non determinstic wasm + // We do not allow to set the code hash to a non-deterministic wasm assert_err!( >::bare_call( ALICE, @@ -5252,7 +3770,7 @@ fn delegate_call_indeterministic_code() { ExtBuilder::default().existential_deposit(200).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - // Put the non deterministic contract on-chain + // Put the non-deterministic contract on-chain assert_ok!(Contracts::upload_code( RuntimeOrigin::signed(ALICE), wasm, @@ -5261,20 +3779,8 @@ fn delegate_call_indeterministic_code() { )); // Create the contract that will call `seal_delegate_call` - let caller_addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(caller_wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let caller_addr = + builder::bare_instantiate(Code::Upload(caller_wasm)).build_and_unwrap_account_id(); // The delegate call will fail in deterministic mode assert_err!( @@ -5293,7 +3799,7 @@ fn delegate_call_indeterministic_code() { >::Indeterministic, ); - // The delegate call will work on non deterministic mode + // The delegate call will work on non-deterministic mode assert_ok!( >::bare_call( ALICE, @@ -5331,17 +3837,9 @@ fn locking_delegate_dependency_works() { // Instantiate the caller contract with the given input. let instantiate = |input: &(u32, H256)| { - Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm_caller.clone()), - input.encode(), - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) + builder::bare_instantiate(Code::Upload(wasm_caller.clone())) + .data(input.encode()) + .build() }; // Call contract with the given input. @@ -5429,7 +3927,7 @@ fn locking_delegate_dependency_works() { contract.storage_base_deposit() - ED ); - // Removing an unexisting dependency should fail. + // Removing a nonexistent dependency should fail. assert_err!( call(&addr_caller, &unlock_delegate_dependency_input).result, Error::::DelegateDependencyNotFound @@ -5509,17 +4007,7 @@ fn native_dependency_deposit_works() { }; // Instantiate the set_code_hash contract. - let res = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - code, - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ); + let res = builder::bare_instantiate(code).build(); let addr = res.result.unwrap().account_id; let base_deposit = ED + test_utils::contract_info_storage_deposit(&addr); @@ -5567,37 +4055,17 @@ fn reentrance_count_works_with_call() { ExtBuilder::default().existential_deposit(100).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let contract_addr = Contracts::bare_instantiate( - ALICE, - 300_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let contract_addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(300_000) + .build_and_unwrap_account_id(); // passing reentrant count to the input let input = 0.encode(); - Contracts::bare_call( - ALICE, - contract_addr, - 0, - GAS_LIMIT, - None, - input, - DebugInfo::UnsafeDebug, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + builder::bare_call(contract_addr) + .data(input) + .debug(DebugInfo::UnsafeDebug) + .build_and_unwrap_result(); }); } @@ -5608,37 +4076,17 @@ fn reentrance_count_works_with_delegated_call() { ExtBuilder::default().existential_deposit(100).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let contract_addr = Contracts::bare_instantiate( - ALICE, - 300_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let contract_addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(300_000) + .build_and_unwrap_account_id(); // adding a callstack height to the input let input = (code_hash, 1).encode(); - Contracts::bare_call( - ALICE, - contract_addr.clone(), - 0, - GAS_LIMIT, - None, - input, - DebugInfo::UnsafeDebug, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + builder::bare_call(contract_addr.clone()) + .data(input) + .debug(DebugInfo::UnsafeDebug) + .build_and_unwrap_result(); }); } @@ -5651,63 +4099,23 @@ fn account_reentrance_count_works() { ExtBuilder::default().existential_deposit(100).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let contract_addr = Contracts::bare_instantiate( - ALICE, - 300_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let contract_addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(300_000) + .build_and_unwrap_account_id(); - let another_contract_addr = Contracts::bare_instantiate( - ALICE, - 300_000, - GAS_LIMIT, - None, - Code::Upload(wasm_reentrance_count), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let another_contract_addr = builder::bare_instantiate(Code::Upload(wasm_reentrance_count)) + .value(300_000) + .build_and_unwrap_account_id(); - let result1 = Contracts::bare_call( - ALICE, - contract_addr.clone(), - 0, - GAS_LIMIT, - None, - contract_addr.encode(), - DebugInfo::UnsafeDebug, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result1 = builder::bare_call(contract_addr.clone()) + .data(contract_addr.encode()) + .debug(DebugInfo::UnsafeDebug) + .build_and_unwrap_result(); - let result2 = Contracts::bare_call( - ALICE, - contract_addr.clone(), - 0, - GAS_LIMIT, - None, - another_contract_addr.encode(), - DebugInfo::UnsafeDebug, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result2 = builder::bare_call(contract_addr.clone()) + .data(another_contract_addr.encode()) + .debug(DebugInfo::UnsafeDebug) + .build_and_unwrap_result(); assert_eq!(result1.data, 1.encode()); assert_eq!(result2.data, 0.encode()); @@ -5754,7 +4162,7 @@ fn signed_cannot_set_code() { fn none_cannot_call_code() { ExtBuilder::default().build().execute_with(|| { assert_noop!( - Contracts::call(RuntimeOrigin::none(), BOB, 0, GAS_LIMIT, None, Vec::new()), + builder::call(BOB).origin(RuntimeOrigin::none()).build(), DispatchError::BadOrigin, ); }); @@ -5767,30 +4175,10 @@ fn root_can_call() { ExtBuilder::default().existential_deposit(100).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_account_id(); // Call the contract. - assert_ok!(Contracts::call( - RuntimeOrigin::root(), - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(builder::call(addr.clone()).origin(RuntimeOrigin::root()).build()); }); } @@ -5800,15 +4188,7 @@ fn root_cannot_instantiate_with_code() { ExtBuilder::default().build().execute_with(|| { assert_err_ignore_postinfo!( - Contracts::instantiate_with_code( - RuntimeOrigin::root(), - 0, - GAS_LIMIT, - None, - wasm, - vec![], - vec![], - ), + builder::instantiate_with_code(wasm).origin(RuntimeOrigin::root()).build(), DispatchError::BadOrigin ); }); @@ -5820,15 +4200,7 @@ fn root_cannot_instantiate() { ExtBuilder::default().build().execute_with(|| { assert_err_ignore_postinfo!( - Contracts::instantiate( - RuntimeOrigin::root(), - 0, - GAS_LIMIT, - None, - code_hash, - vec![], - vec![], - ), + builder::instantiate(code_hash).origin(RuntimeOrigin::root()).build(), DispatchError::BadOrigin ); }); @@ -5881,53 +4253,25 @@ fn only_instantiation_origin_can_instantiate() { let _ = Balances::set_balance(&BOB, 1_000_000); assert_err_ignore_postinfo!( - Contracts::instantiate_with_code( - RuntimeOrigin::root(), - 0, - GAS_LIMIT, - None, - code.clone(), - vec![], - vec![], - ), + builder::instantiate_with_code(code.clone()) + .origin(RuntimeOrigin::root()) + .build(), DispatchError::BadOrigin ); assert_err_ignore_postinfo!( - Contracts::instantiate_with_code( - RuntimeOrigin::signed(BOB), - 0, - GAS_LIMIT, - None, - code.clone(), - vec![], - vec![], - ), + builder::instantiate_with_code(code.clone()) + .origin(RuntimeOrigin::signed(BOB)) + .build(), DispatchError::BadOrigin ); // Only Alice can instantiate - assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), - 0, - GAS_LIMIT, - None, - code, - vec![], - vec![], - ),); + assert_ok!(builder::instantiate_with_code(code).build()); // Bob cannot instantiate with either `instantiate_with_code` or `instantiate`. assert_err_ignore_postinfo!( - Contracts::instantiate( - RuntimeOrigin::signed(BOB), - 0, - GAS_LIMIT, - None, - code_hash, - vec![], - vec![], - ), + builder::instantiate(code_hash).origin(RuntimeOrigin::signed(BOB)).build(), DispatchError::BadOrigin ); }); @@ -5940,44 +4284,18 @@ fn balance_api_returns_free_balance() { let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Instantiate the BOB contract without any extra balance. - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm.to_vec()), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = + builder::bare_instantiate(Code::Upload(wasm.to_vec())).build_and_unwrap_account_id(); let value = 0; // Call BOB which makes it call the balance runtime API. // The contract code asserts that the returned balance is 0. - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - value, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(builder::call(addr.clone()).value(value).build()); let value = 1; // Calling with value will trap the contract. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - value, - GAS_LIMIT, - None, - vec![] - ), + builder::call(addr.clone()).value(value).build(), >::ContractTrapped ); }); @@ -5989,37 +4307,14 @@ fn gas_consumed_is_linear_for_nested_calls() { ExtBuilder::default().existential_deposit(200).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_account_id(); let max_call_depth = ::CallStack::size() as u32; let [gas_0, gas_1, gas_2, gas_max] = { [0u32, 1u32, 2u32, max_call_depth] .iter() .map(|i| { - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - i.encode(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr.clone()).data(i.encode()).build(); assert_ok!(result.result); result.gas_consumed }) diff --git a/substrate/frame/contracts/src/tests/builder.rs b/substrate/frame/contracts/src/tests/builder.rs new file mode 100644 index 000000000000..08d12503a290 --- /dev/null +++ b/substrate/frame/contracts/src/tests/builder.rs @@ -0,0 +1,219 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{AccountId32, Test, ALICE, GAS_LIMIT}; +use crate::{ + tests::RuntimeOrigin, AccountIdLookupOf, AccountIdOf, BalanceOf, Code, CodeHash, CollectEvents, + ContractExecResult, ContractInstantiateResult, DebugInfo, Determinism, EventRecordOf, + ExecReturnValue, OriginFor, Pallet, Weight, +}; +use codec::Compact; +use frame_support::pallet_prelude::DispatchResultWithPostInfo; +use paste::paste; + +/// Helper macro to generate a builder for contract API calls. +macro_rules! builder { + // Entry point to generate a builder for the given method. + ( + $method:ident($($field:ident: $type:ty,)*) -> $result:ty + ) => { + paste!{ + builder!([< $method:camel Builder >], $method($($field: $type,)* ) -> $result); + } + }; + // Generate the builder struct and its methods. + ( + $name:ident, + $method:ident( + $($field:ident: $type:ty,)* + ) -> $result:ty + ) => { + #[doc = concat!("A builder to construct a ", stringify!($method), " call")] + pub struct $name { + $($field: $type,)* + } + + #[allow(dead_code)] + impl $name + { + $( + #[doc = concat!("Set the ", stringify!($field))] + pub fn $field(mut self, value: $type) -> Self { + self.$field = value; + self + } + )* + + #[doc = concat!("Build the ", stringify!($method), " call")] + pub fn build(self) -> $result { + Pallet::::$method( + $(self.$field,)* + ) + } + } + } +} + +builder!( + instantiate_with_code( + origin: OriginFor, + value: BalanceOf, + gas_limit: Weight, + storage_deposit_limit: Option>>, + code: Vec, + data: Vec, + salt: Vec, + ) -> DispatchResultWithPostInfo +); + +builder!( + instantiate( + origin: OriginFor, + value: BalanceOf, + gas_limit: Weight, + storage_deposit_limit: Option>>, + code_hash: CodeHash, + data: Vec, + salt: Vec, + ) -> DispatchResultWithPostInfo +); + +builder!( + bare_instantiate( + origin: AccountIdOf, + value: BalanceOf, + gas_limit: Weight, + storage_deposit_limit: Option>, + code: Code>, + data: Vec, + salt: Vec, + debug: DebugInfo, + collect_events: CollectEvents, + ) -> ContractInstantiateResult, BalanceOf, EventRecordOf> +); + +builder!( + call( + origin: OriginFor, + dest: AccountIdLookupOf, + value: BalanceOf, + gas_limit: Weight, + storage_deposit_limit: Option>>, + data: Vec, + ) -> DispatchResultWithPostInfo +); + +builder!( + bare_call( + origin: AccountIdOf, + dest: AccountIdOf, + value: BalanceOf, + gas_limit: Weight, + storage_deposit_limit: Option>, + data: Vec, + debug: DebugInfo, + collect_events: CollectEvents, + determinism: Determinism, + ) -> ContractExecResult, EventRecordOf> +); + +/// Create a [`BareInstantiateBuilder`] with default values. +pub fn bare_instantiate(code: Code>) -> BareInstantiateBuilder { + BareInstantiateBuilder { + origin: ALICE, + value: 0, + gas_limit: GAS_LIMIT, + storage_deposit_limit: None, + code, + data: vec![], + salt: vec![], + debug: DebugInfo::Skip, + collect_events: CollectEvents::Skip, + } +} + +impl BareInstantiateBuilder { + /// Build the instantiate call and unwrap the result. + pub fn build_and_unwrap_result(self) -> crate::InstantiateReturnValue> { + self.build().result.unwrap() + } + + /// Build the instantiate call and unwrap the account id. + pub fn build_and_unwrap_account_id(self) -> AccountIdOf { + self.build().result.unwrap().account_id + } +} + +/// Create a [`BareCallBuilder`] with default values. +pub fn bare_call(dest: AccountId32) -> BareCallBuilder { + BareCallBuilder { + origin: ALICE, + dest, + value: 0, + gas_limit: GAS_LIMIT, + storage_deposit_limit: None, + data: vec![], + debug: DebugInfo::Skip, + collect_events: CollectEvents::Skip, + determinism: Determinism::Enforced, + } +} + +impl BareCallBuilder { + /// Build the call and unwrap the result. + pub fn build_and_unwrap_result(self) -> ExecReturnValue { + self.build().result.unwrap() + } +} + +/// Create an [`InstantiateWithCodeBuilder`] with default values. +pub fn instantiate_with_code(code: Vec) -> InstantiateWithCodeBuilder { + InstantiateWithCodeBuilder { + origin: RuntimeOrigin::signed(ALICE), + value: 0, + gas_limit: GAS_LIMIT, + storage_deposit_limit: None, + code, + data: vec![], + salt: vec![], + } +} + +/// Create an [`InstantiateBuilder`] with default values. +pub fn instantiate(code_hash: CodeHash) -> InstantiateBuilder { + InstantiateBuilder { + origin: RuntimeOrigin::signed(ALICE), + value: 0, + gas_limit: GAS_LIMIT, + storage_deposit_limit: None, + code_hash, + data: vec![], + salt: vec![], + } +} + +/// Create a [`CallBuilder`] with default values. +pub fn call(dest: AccountIdLookupOf) -> CallBuilder { + CallBuilder { + origin: RuntimeOrigin::signed(ALICE), + dest, + value: 0, + gas_limit: GAS_LIMIT, + storage_deposit_limit: None, + data: vec![], + } +} diff --git a/substrate/frame/contracts/src/wasm/mod.rs b/substrate/frame/contracts/src/wasm/mod.rs index 0ffe561e76ed..e24a6e0bc07c 100644 --- a/substrate/frame/contracts/src/wasm/mod.rs +++ b/substrate/frame/contracts/src/wasm/mod.rs @@ -1426,7 +1426,7 @@ mod tests { #[test] fn contract_ecdsa_to_eth_address() { - /// calls `seal_ecdsa_to_eth_address` for the contstant and ensures the result equals the + /// calls `seal_ecdsa_to_eth_address` for the constant and ensures the result equals the /// expected one. const CODE_ECDSA_TO_ETH_ADDRESS: &str = r#" (module diff --git a/substrate/frame/contracts/src/wasm/runtime.rs b/substrate/frame/contracts/src/wasm/runtime.rs index 38f3a1c8f343..464f5b66f40e 100644 --- a/substrate/frame/contracts/src/wasm/runtime.rs +++ b/substrate/frame/contracts/src/wasm/runtime.rs @@ -28,12 +28,8 @@ use alloc::{boxed::Box, vec, vec::Vec}; use alloc::fmt; use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen}; use frame_support::{ - dispatch::DispatchInfo, - ensure, - pallet_prelude::{DispatchResult, DispatchResultWithPostInfo}, - parameter_types, - traits::Get, - weights::Weight, + dispatch::DispatchInfo, ensure, pallet_prelude::DispatchResultWithPostInfo, parameter_types, + traits::Get, weights::Weight, }; use pallet_contracts_proc_macro::define_env; use pallet_contracts_uapi::{CallFlags, ReturnFlags}; @@ -43,9 +39,6 @@ use sp_runtime::{ DispatchError, RuntimeDebug, }; use wasmi::{core::HostError, errors::LinkerError, Linker, Memory, Store}; -use xcm::VersionedXcm; - -type CallOf = ::RuntimeCall; /// The maximum nesting depth a contract can use when encoding types. const MAX_DECODE_NESTING: u32 = 256; @@ -247,7 +240,7 @@ pub enum RuntimeCosts { /// Weight of calling `account_reentrance_count` AccountEntranceCount, /// Weight of calling `instantiation_nonce` - InstantationNonce, + InstantiationNonce, /// Weight of calling `lock_delegate_dependency` LockDelegateDependency, /// Weight of calling `unlock_delegate_dependency` @@ -339,7 +332,7 @@ impl Token for RuntimeCosts { EcdsaToEthAddress => s.ecdsa_to_eth_address, ReentrantCount => s.reentrance_count, AccountEntranceCount => s.account_reentrance_count, - InstantationNonce => s.instantiation_nonce, + InstantiationNonce => s.instantiation_nonce, LockDelegateDependency => s.lock_delegate_dependency, UnlockDelegateDependency => s.unlock_delegate_dependency, } @@ -380,29 +373,6 @@ fn already_charged(_: u32) -> Option { None } -/// Ensure that the XCM program is executable, by checking that it does not contain any [`Transact`] -/// instruction with a call that is not allowed by the CallFilter. -fn ensure_executable(message: &VersionedXcm>) -> DispatchResult { - use frame_support::traits::Contains; - use xcm::prelude::{Transact, Xcm}; - - let mut message: Xcm> = - message.clone().try_into().map_err(|_| Error::::XCMDecodeFailed)?; - - message.iter_mut().try_for_each(|inst| -> DispatchResult { - let Transact { ref mut call, .. } = inst else { return Ok(()) }; - let call = call.ensure_decoded().map_err(|_| Error::::XCMDecodeFailed)?; - - if !::CallFilter::contains(call) { - return Err(frame_system::Error::::CallFiltered.into()) - } - - Ok(()) - })?; - - Ok(()) -} - /// Can only be used for one call. pub struct Runtime<'a, E: Ext + 'a> { ext: &'a mut E, @@ -2114,16 +2084,13 @@ pub mod env { msg_len: u32, ) -> Result { use frame_support::dispatch::DispatchInfo; - use xcm::VersionedXcm; use xcm_builder::{ExecuteController, ExecuteControllerWeightInfo}; ctx.charge_gas(RuntimeCosts::CopyFromContract(msg_len))?; - let message: VersionedXcm> = - ctx.read_sandbox_memory_as_unbounded(memory, msg_ptr, msg_len)?; - ensure_executable::(&message)?; + let message = ctx.read_sandbox_memory_as_unbounded(memory, msg_ptr, msg_len)?; let execute_weight = - <::Xcm as ExecuteController<_, _>>::WeightInfo::execute(); + <::Xcm as ExecuteController<_, _>>::WeightInfo::execute_blob(); let weight = ctx.ext.gas_meter().gas_left().max(execute_weight); let dispatch_info = DispatchInfo { weight, ..Default::default() }; @@ -2132,9 +2099,9 @@ pub mod env { RuntimeCosts::CallXcmExecute, |ctx| { let origin = crate::RawOrigin::Signed(ctx.ext.address().clone()).into(); - let weight_used = <::Xcm>::execute( + let weight_used = <::Xcm>::execute_blob( origin, - Box::new(message), + message, weight.saturating_sub(execute_weight), )?; @@ -2154,19 +2121,18 @@ pub mod env { msg_len: u32, output_ptr: u32, ) -> Result { - use xcm::{VersionedLocation, VersionedXcm}; + use xcm::VersionedLocation; use xcm_builder::{SendController, SendControllerWeightInfo}; ctx.charge_gas(RuntimeCosts::CopyFromContract(msg_len))?; let dest: VersionedLocation = ctx.read_sandbox_memory_as(memory, dest_ptr)?; - let message: VersionedXcm<()> = - ctx.read_sandbox_memory_as_unbounded(memory, msg_ptr, msg_len)?; - let weight = <::Xcm as SendController<_>>::WeightInfo::send(); + let message = ctx.read_sandbox_memory_as_unbounded(memory, msg_ptr, msg_len)?; + let weight = <::Xcm as SendController<_>>::WeightInfo::send_blob(); ctx.charge_gas(RuntimeCosts::CallRuntime(weight))?; let origin = crate::RawOrigin::Signed(ctx.ext.address().clone()).into(); - match <::Xcm>::send(origin, dest.into(), message.into()) { + match <::Xcm>::send_blob(origin, dest.into(), message) { Ok(message_id) => { ctx.write_sandbox_memory(memory, output_ptr, &message_id.encode())?; Ok(ReturnErrorCode::Success) @@ -2300,7 +2266,7 @@ pub mod env { /// Returns a nonce that is unique per contract instantiation. /// See [`pallet_contracts_uapi::HostFn::instantiation_nonce`]. fn instantiation_nonce(ctx: _, _memory: _) -> Result { - ctx.charge_gas(RuntimeCosts::InstantationNonce)?; + ctx.charge_gas(RuntimeCosts::InstantiationNonce)?; Ok(ctx.ext.nonce()) } diff --git a/substrate/frame/contracts/src/weights.rs b/substrate/frame/contracts/src/weights.rs index 99bc4d8d3eb9..aa75ca8b29e3 100644 --- a/substrate/frame/contracts/src/weights.rs +++ b/substrate/frame/contracts/src/weights.rs @@ -18,14 +18,13 @@ //! Autogenerated weights for `pallet_contracts` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-03-04, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-19, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-p5qp1txx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: // target/production/substrate-node -// target/production/substrate-node // benchmark // pallet // --steps=50 @@ -36,12 +35,8 @@ // --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=pallet_contracts // --chain=dev -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_contracts -// --chain=dev // --header=./substrate/HEADER-APACHE2 // --output=./substrate/frame/contracts/src/weights.rs -// --output=./substrate/frame/contracts/src/weights.rs // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -147,8 +142,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_054_000 picoseconds. - Weight::from_parts(2_178_000, 1627) + // Minimum execution time: 2_130_000 picoseconds. + Weight::from_parts(2_247_000, 1627) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -158,10 +153,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `452 + k * (69 ±0)` // Estimated: `442 + k * (70 ±0)` - // Minimum execution time: 12_119_000 picoseconds. - Weight::from_parts(12_536_000, 442) - // Standard Error: 1_254 - .saturating_add(Weight::from_parts(1_161_885, 0).saturating_mul(k.into())) + // Minimum execution time: 12_748_000 picoseconds. + Weight::from_parts(13_001_000, 442) + // Standard Error: 1_206 + .saturating_add(Weight::from_parts(1_146_159, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -175,10 +170,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `211 + c * (1 ±0)` // Estimated: `6149 + c * (1 ±0)` - // Minimum execution time: 8_146_000 picoseconds. - Weight::from_parts(8_560_745, 6149) + // Minimum execution time: 8_636_000 picoseconds. + Weight::from_parts(8_664_917, 6149) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_182, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(1_188, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -191,8 +186,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `510` // Estimated: `6450` - // Minimum execution time: 16_183_000 picoseconds. - Weight::from_parts(16_825_000, 6450) + // Minimum execution time: 16_838_000 picoseconds. + Weight::from_parts(17_400_000, 6450) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -205,10 +200,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `171 + k * (1 ±0)` // Estimated: `3635 + k * (1 ±0)` - // Minimum execution time: 3_645_000 picoseconds. - Weight::from_parts(604_365, 3635) - // Standard Error: 1_013 - .saturating_add(Weight::from_parts(1_100_277, 0).saturating_mul(k.into())) + // Minimum execution time: 3_876_000 picoseconds. + Weight::from_parts(2_654_935, 3635) + // Standard Error: 2_001 + .saturating_add(Weight::from_parts(1_258_085, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -218,6 +213,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553053f13fd319a03c211337c76e0fe776df` (r:2 w:0) /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:1 w:1) /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:1 w:1) + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:0 w:1) @@ -227,10 +224,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `328 + c * (1 ±0)` // Estimated: `6266 + c * (1 ±0)` - // Minimum execution time: 19_500_000 picoseconds. - Weight::from_parts(20_074_895, 6266) + // Minimum execution time: 21_038_000 picoseconds. + Weight::from_parts(20_890_548, 6266) // Standard Error: 1 - .saturating_add(Weight::from_parts(422, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(435, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -241,8 +238,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `440` // Estimated: `6380` - // Minimum execution time: 12_530_000 picoseconds. - Weight::from_parts(13_362_000, 6380) + // Minimum execution time: 12_579_000 picoseconds. + Weight::from_parts(13_486_000, 6380) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -251,13 +248,13 @@ impl WeightInfo for SubstrateWeight { /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) fn v14_migration_step() -> Weight { // Proof Size summary in bytes: // Measured: `352` // Estimated: `6292` - // Minimum execution time: 44_275_000 picoseconds. - Weight::from_parts(45_718_000, 6292) + // Minimum execution time: 47_123_000 picoseconds. + Weight::from_parts(48_284_000, 6292) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -269,8 +266,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `594` // Estimated: `6534` - // Minimum execution time: 55_095_000 picoseconds. - Weight::from_parts(58_009_000, 6534) + // Minimum execution time: 55_237_000 picoseconds. + Weight::from_parts(57_996_000, 6534) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -280,8 +277,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_455_000 picoseconds. - Weight::from_parts(2_608_000, 1627) + // Minimum execution time: 2_766_000 picoseconds. + Weight::from_parts(2_807_000, 1627) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -293,8 +290,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `166` // Estimated: `3631` - // Minimum execution time: 11_207_000 picoseconds. - Weight::from_parts(11_802_000, 3631) + // Minimum execution time: 12_243_000 picoseconds. + Weight::from_parts(12_890_000, 3631) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -304,8 +301,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 4_581_000 picoseconds. - Weight::from_parts(4_781_000, 3607) + // Minimum execution time: 4_951_000 picoseconds. + Weight::from_parts(5_232_000, 3607) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -316,8 +313,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `167` // Estimated: `3632` - // Minimum execution time: 5_887_000 picoseconds. - Weight::from_parts(6_393_000, 3632) + // Minimum execution time: 6_530_000 picoseconds. + Weight::from_parts(6_726_000, 3632) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -328,13 +325,15 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 6_025_000 picoseconds. - Weight::from_parts(6_298_000, 3607) + // Minimum execution time: 6_227_000 picoseconds. + Weight::from_parts(6_708_000, 3607) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -352,20 +351,22 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `804 + c * (1 ±0)` // Estimated: `9217 + c * (1 ±0)` - // Minimum execution time: 294_976_000 picoseconds. - Weight::from_parts(277_235_948, 9217) - // Standard Error: 65 - .saturating_add(Weight::from_parts(34_445, 0).saturating_mul(c.into())) + // Minimum execution time: 309_889_000 picoseconds. + Weight::from_parts(277_084_159, 9217) + // Standard Error: 71 + .saturating_add(Weight::from_parts(33_471, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:2 w:2) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// Storage: `System::EventTopics` (r:3 w:3) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Contracts::Nonce` (r:1 w:1) @@ -385,14 +386,14 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `326` // Estimated: `8740` - // Minimum execution time: 3_864_558_000 picoseconds. - Weight::from_parts(421_676_889, 8740) - // Standard Error: 188 - .saturating_add(Weight::from_parts(103_788, 0).saturating_mul(c.into())) - // Standard Error: 22 - .saturating_add(Weight::from_parts(1_715, 0).saturating_mul(i.into())) - // Standard Error: 22 - .saturating_add(Weight::from_parts(1_774, 0).saturating_mul(s.into())) + // Minimum execution time: 3_909_680_000 picoseconds. + Weight::from_parts(446_471_160, 8740) + // Standard Error: 159 + .saturating_add(Weight::from_parts(101_085, 0).saturating_mul(c.into())) + // Standard Error: 19 + .saturating_add(Weight::from_parts(1_598, 0).saturating_mul(i.into())) + // Standard Error: 19 + .saturating_add(Weight::from_parts(1_879, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(14_u64)) .saturating_add(T::DbWeight::get().writes(10_u64)) } @@ -402,6 +403,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:1 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::Nonce` (r:1 w:1) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) @@ -413,24 +416,26 @@ impl WeightInfo for SubstrateWeight { /// Storage: `System::EventTopics` (r:2 w:2) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// The range of component `i` is `[0, 1048576]`. /// The range of component `s` is `[0, 1048576]`. fn instantiate(i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `563` // Estimated: `8982` - // Minimum execution time: 2_069_276_000 picoseconds. - Weight::from_parts(377_210_279, 8982) - // Standard Error: 9 - .saturating_add(Weight::from_parts(1_719, 0).saturating_mul(i.into())) - // Standard Error: 9 - .saturating_add(Weight::from_parts(1_728, 0).saturating_mul(s.into())) + // Minimum execution time: 1_968_545_000 picoseconds. + Weight::from_parts(420_048_028, 8982) + // Standard Error: 20 + .saturating_add(Weight::from_parts(1_685, 0).saturating_mul(i.into())) + // Standard Error: 20 + .saturating_add(Weight::from_parts(1_645, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(13_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -447,17 +452,19 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `829` // Estimated: `9244` - // Minimum execution time: 199_037_000 picoseconds. - Weight::from_parts(213_931_000, 9244) + // Minimum execution time: 207_564_000 picoseconds. + Weight::from_parts(216_983_000, 9244) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// Storage: `System::EventTopics` (r:1 w:1) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:0 w:1) @@ -467,10 +474,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `145` // Estimated: `6085` - // Minimum execution time: 278_482_000 picoseconds. - Weight::from_parts(262_753_879, 6085) - // Standard Error: 112 - .saturating_add(Weight::from_parts(66_129, 0).saturating_mul(c.into())) + // Minimum execution time: 273_555_000 picoseconds. + Weight::from_parts(257_517_935, 6085) + // Standard Error: 148 + .saturating_add(Weight::from_parts(64_488, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -491,10 +498,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `145` // Estimated: `6085` - // Minimum execution time: 286_902_000 picoseconds. - Weight::from_parts(269_003_959, 6085) - // Standard Error: 123 - .saturating_add(Weight::from_parts(66_629, 0).saturating_mul(c.into())) + // Minimum execution time: 289_672_000 picoseconds. + Weight::from_parts(297_020_278, 6085) + // Standard Error: 86 + .saturating_add(Weight::from_parts(64_340, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -503,7 +510,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// Storage: `System::EventTopics` (r:1 w:1) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:0 w:1) @@ -512,8 +519,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `315` // Estimated: `3780` - // Minimum execution time: 44_128_000 picoseconds. - Weight::from_parts(46_044_000, 3780) + // Minimum execution time: 45_930_000 picoseconds. + Weight::from_parts(47_288_000, 3780) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -529,8 +536,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `552` // Estimated: `8967` - // Minimum execution time: 33_567_000 picoseconds. - Weight::from_parts(35_057_000, 8967) + // Minimum execution time: 35_421_000 picoseconds. + Weight::from_parts(36_909_000, 8967) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -538,6 +545,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -553,10 +562,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `869 + r * (6 ±0)` // Estimated: `9284 + r * (6 ±0)` - // Minimum execution time: 272_376_000 picoseconds. - Weight::from_parts(284_162_161, 9284) - // Standard Error: 501 - .saturating_add(Weight::from_parts(341_575, 0).saturating_mul(r.into())) + // Minimum execution time: 275_531_000 picoseconds. + Weight::from_parts(292_269_656, 9284) + // Standard Error: 672 + .saturating_add(Weight::from_parts(339_728, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -565,6 +574,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1601 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -580,10 +591,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `925 + r * (209 ±0)` // Estimated: `9304 + r * (2684 ±0)` - // Minimum execution time: 256_990_000 picoseconds. - Weight::from_parts(107_167_044, 9304) - // Standard Error: 7_545 - .saturating_add(Weight::from_parts(3_628_112, 0).saturating_mul(r.into())) + // Minimum execution time: 275_829_000 picoseconds. + Weight::from_parts(124_543_289, 9304) + // Standard Error: 6_085 + .saturating_add(Weight::from_parts(3_702_964, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) @@ -593,6 +604,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1601 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -608,10 +621,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `924 + r * (213 ±0)` // Estimated: `9308 + r * (2688 ±0)` - // Minimum execution time: 272_889_000 picoseconds. - Weight::from_parts(110_341_799, 9308) - // Standard Error: 7_881 - .saturating_add(Weight::from_parts(4_427_043, 0).saturating_mul(r.into())) + // Minimum execution time: 276_666_000 picoseconds. + Weight::from_parts(96_951_288, 9308) + // Standard Error: 8_876 + .saturating_add(Weight::from_parts(4_604_699, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) @@ -621,6 +634,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -636,10 +651,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `876 + r * (6 ±0)` // Estimated: `9293 + r * (6 ±0)` - // Minimum execution time: 275_027_000 picoseconds. - Weight::from_parts(283_302_223, 9293) - // Standard Error: 1_179 - .saturating_add(Weight::from_parts(436_023, 0).saturating_mul(r.into())) + // Minimum execution time: 271_301_000 picoseconds. + Weight::from_parts(284_126_054, 9293) + // Standard Error: 886 + .saturating_add(Weight::from_parts(437_127, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -648,6 +663,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -663,16 +680,18 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `866 + r * (3 ±0)` // Estimated: `9282 + r * (3 ±0)` - // Minimum execution time: 270_137_000 picoseconds. - Weight::from_parts(282_756_362, 9282) - // Standard Error: 384 - .saturating_add(Weight::from_parts(171_660, 0).saturating_mul(r.into())) + // Minimum execution time: 274_778_000 picoseconds. + Weight::from_parts(289_355_269, 9282) + // Standard Error: 382 + .saturating_add(Weight::from_parts(175_342, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -688,10 +707,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `756 + r * (3 ±0)` // Estimated: `9171 + r * (3 ±0)` - // Minimum execution time: 255_131_000 picoseconds. - Weight::from_parts(269_207_006, 9171) - // Standard Error: 542 - .saturating_add(Weight::from_parts(161_597, 0).saturating_mul(r.into())) + // Minimum execution time: 257_310_000 picoseconds. + Weight::from_parts(276_410_847, 9171) + // Standard Error: 733 + .saturating_add(Weight::from_parts(160_094, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(10_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) @@ -700,6 +719,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -715,10 +736,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `870 + r * (6 ±0)` // Estimated: `9285 + r * (6 ±0)` - // Minimum execution time: 272_172_000 picoseconds. - Weight::from_parts(283_747_134, 9285) - // Standard Error: 885 - .saturating_add(Weight::from_parts(343_968, 0).saturating_mul(r.into())) + // Minimum execution time: 278_305_000 picoseconds. + Weight::from_parts(282_372_935, 9285) + // Standard Error: 1_154 + .saturating_add(Weight::from_parts(343_382, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -727,6 +748,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -742,10 +765,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `866 + r * (6 ±0)` // Estimated: `9284 + r * (6 ±0)` - // Minimum execution time: 263_220_000 picoseconds. - Weight::from_parts(277_062_382, 9284) - // Standard Error: 1_783 - .saturating_add(Weight::from_parts(399_631, 0).saturating_mul(r.into())) + // Minimum execution time: 280_349_000 picoseconds. + Weight::from_parts(294_864_875, 9284) + // Standard Error: 1_010 + .saturating_add(Weight::from_parts(368_740, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -754,6 +777,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:2 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -769,10 +794,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1010 + r * (6 ±0)` // Estimated: `9409 + r * (6 ±0)` - // Minimum execution time: 253_218_000 picoseconds. - Weight::from_parts(282_251_452, 9409) - // Standard Error: 2_367 - .saturating_add(Weight::from_parts(1_604_060, 0).saturating_mul(r.into())) + // Minimum execution time: 274_578_000 picoseconds. + Weight::from_parts(313_285_034, 9409) + // Standard Error: 2_800 + .saturating_add(Weight::from_parts(1_653_468, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -781,6 +806,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -796,10 +823,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `880 + r * (6 ±0)` // Estimated: `9301 + r * (6 ±0)` - // Minimum execution time: 271_797_000 picoseconds. - Weight::from_parts(288_594_393, 9301) - // Standard Error: 846 - .saturating_add(Weight::from_parts(335_063, 0).saturating_mul(r.into())) + // Minimum execution time: 287_127_000 picoseconds. + Weight::from_parts(290_489_909, 9301) + // Standard Error: 864 + .saturating_add(Weight::from_parts(332_977, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -808,6 +835,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -823,10 +852,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `878 + r * (6 ±0)` // Estimated: `9294 + r * (6 ±0)` - // Minimum execution time: 254_997_000 picoseconds. - Weight::from_parts(284_331_894, 9294) - // Standard Error: 885 - .saturating_add(Weight::from_parts(337_073, 0).saturating_mul(r.into())) + // Minimum execution time: 273_291_000 picoseconds. + Weight::from_parts(293_650_716, 9294) + // Standard Error: 725 + .saturating_add(Weight::from_parts(323_281, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -835,6 +864,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -850,10 +881,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `875 + r * (6 ±0)` // Estimated: `9297 + r * (6 ±0)` - // Minimum execution time: 273_845_000 picoseconds. - Weight::from_parts(285_291_242, 9297) - // Standard Error: 690 - .saturating_add(Weight::from_parts(332_783, 0).saturating_mul(r.into())) + // Minimum execution time: 282_061_000 picoseconds. + Weight::from_parts(291_729_751, 9297) + // Standard Error: 929 + .saturating_add(Weight::from_parts(324_683, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -862,6 +893,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -877,10 +910,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `866 + r * (6 ±0)` // Estimated: `9282 + r * (6 ±0)` - // Minimum execution time: 268_302_000 picoseconds. - Weight::from_parts(280_463_257, 9282) - // Standard Error: 1_035 - .saturating_add(Weight::from_parts(345_811, 0).saturating_mul(r.into())) + // Minimum execution time: 264_505_000 picoseconds. + Weight::from_parts(293_440_286, 9282) + // Standard Error: 704 + .saturating_add(Weight::from_parts(329_851, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -889,6 +922,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -906,10 +941,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `940 + r * (14 ±0)` // Estimated: `9350 + r * (14 ±0)` - // Minimum execution time: 263_433_000 picoseconds. - Weight::from_parts(290_098_370, 9350) - // Standard Error: 1_905 - .saturating_add(Weight::from_parts(805_299, 0).saturating_mul(r.into())) + // Minimum execution time: 277_208_000 picoseconds. + Weight::from_parts(304_294_691, 9350) + // Standard Error: 1_083 + .saturating_add(Weight::from_parts(824_245, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 14).saturating_mul(r.into())) @@ -918,6 +953,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -933,10 +970,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `868 + r * (6 ±0)` // Estimated: `9285 + r * (6 ±0)` - // Minimum execution time: 273_588_000 picoseconds. - Weight::from_parts(287_133_565, 9285) - // Standard Error: 799 - .saturating_add(Weight::from_parts(254_114, 0).saturating_mul(r.into())) + // Minimum execution time: 278_293_000 picoseconds. + Weight::from_parts(289_743_005, 9285) + // Standard Error: 672 + .saturating_add(Weight::from_parts(267_553, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -945,6 +982,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -960,10 +999,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `872` // Estimated: `9287` - // Minimum execution time: 257_843_000 picoseconds. - Weight::from_parts(228_177_495, 9287) - // Standard Error: 24 - .saturating_add(Weight::from_parts(985, 0).saturating_mul(n.into())) + // Minimum execution time: 279_495_000 picoseconds. + Weight::from_parts(232_736_994, 9287) + // Standard Error: 23 + .saturating_add(Weight::from_parts(1_008, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -971,6 +1010,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -986,10 +1027,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `856 + r * (45 ±0)` // Estimated: `9271 + r * (45 ±0)` - // Minimum execution time: 248_332_000 picoseconds. - Weight::from_parts(274_998_906, 9271) - // Standard Error: 949_561 - .saturating_add(Weight::from_parts(5_570_693, 0).saturating_mul(r.into())) + // Minimum execution time: 257_920_000 picoseconds. + Weight::from_parts(282_276_265, 9271) + // Standard Error: 948_490 + .saturating_add(Weight::from_parts(2_408_134, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 45).saturating_mul(r.into())) @@ -998,6 +1039,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1013,10 +1056,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `866` // Estimated: `9288` - // Minimum execution time: 272_055_000 picoseconds. - Weight::from_parts(282_280_090, 9288) - // Standard Error: 1 - .saturating_add(Weight::from_parts(330, 0).saturating_mul(n.into())) + // Minimum execution time: 278_149_000 picoseconds. + Weight::from_parts(287_020_337, 9288) + // Standard Error: 0 + .saturating_add(Weight::from_parts(321, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -1024,9 +1067,11 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:3 w:3) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:1 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:2 w:2) + /// Storage: `Contracts::CodeInfoOf` (r:33 w:33) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:1 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) @@ -1037,28 +1082,30 @@ impl WeightInfo for SubstrateWeight { /// Storage: `System::EventTopics` (r:4 w:4) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// Storage: `Contracts::DeletionQueue` (r:0 w:1) /// Proof: `Contracts::DeletionQueue` (`max_values`: None, `max_size`: Some(142), added: 2617, mode: `Measured`) /// The range of component `r` is `[0, 1]`. fn seal_terminate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2902 + r * (529 ±0)` - // Estimated: `11317 + r * (5479 ±0)` - // Minimum execution time: 274_842_000 picoseconds. - Weight::from_parts(299_824_497, 11317) - // Standard Error: 902_686 - .saturating_add(Weight::from_parts(106_562_902, 0).saturating_mul(r.into())) + // Measured: `4805 + r * (2121 ±0)` + // Estimated: `13220 + r * (81321 ±0)` + // Minimum execution time: 307_763_000 picoseconds. + Weight::from_parts(323_648_618, 13220) + // Standard Error: 879_890 + .saturating_add(Weight::from_parts(249_045_481, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) - .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().reads((36_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(T::DbWeight::get().writes((10_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 5266).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().writes((41_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 81321).saturating_mul(r.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1076,10 +1123,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `947 + r * (10 ±0)` // Estimated: `9363 + r * (10 ±0)` - // Minimum execution time: 255_393_000 picoseconds. - Weight::from_parts(309_814_338, 9363) - // Standard Error: 2_978 - .saturating_add(Weight::from_parts(1_203_507, 0).saturating_mul(r.into())) + // Minimum execution time: 278_400_000 picoseconds. + Weight::from_parts(293_743_000, 9363) + // Standard Error: 1_686 + .saturating_add(Weight::from_parts(1_288_603, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 10).saturating_mul(r.into())) @@ -1088,6 +1135,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1103,10 +1152,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `866 + r * (10 ±0)` // Estimated: `9283 + r * (10 ±0)` - // Minimum execution time: 250_378_000 picoseconds. - Weight::from_parts(287_003_144, 9283) - // Standard Error: 2_726 - .saturating_add(Weight::from_parts(1_850_967, 0).saturating_mul(r.into())) + // Minimum execution time: 272_110_000 picoseconds. + Weight::from_parts(295_620_726, 9283) + // Standard Error: 5_481 + .saturating_add(Weight::from_parts(2_031_955, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 10).saturating_mul(r.into())) @@ -1115,6 +1164,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1131,12 +1182,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `883 + t * (32 ±0)` // Estimated: `9303 + t * (2508 ±0)` - // Minimum execution time: 266_787_000 picoseconds. - Weight::from_parts(284_368_661, 9303) - // Standard Error: 121_315 - .saturating_add(Weight::from_parts(2_690_211, 0).saturating_mul(t.into())) - // Standard Error: 33 - .saturating_add(Weight::from_parts(789, 0).saturating_mul(n.into())) + // Minimum execution time: 277_409_000 picoseconds. + Weight::from_parts(293_838_037, 9303) + // Standard Error: 87_977 + .saturating_add(Weight::from_parts(2_911_340, 0).saturating_mul(t.into())) + // Standard Error: 24 + .saturating_add(Weight::from_parts(531, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) @@ -1147,6 +1198,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1162,10 +1215,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `865 + r * (7 ±0)` // Estimated: `9285 + r * (7 ±0)` - // Minimum execution time: 166_804_000 picoseconds. - Weight::from_parts(175_118_291, 9285) - // Standard Error: 398 - .saturating_add(Weight::from_parts(220_961, 0).saturating_mul(r.into())) + // Minimum execution time: 172_634_000 picoseconds. + Weight::from_parts(183_322_840, 9285) + // Standard Error: 384 + .saturating_add(Weight::from_parts(226_007, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 7).saturating_mul(r.into())) @@ -1174,6 +1227,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `MaxEncodedLen`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1189,10 +1244,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `125816` // Estimated: `131758` - // Minimum execution time: 398_436_000 picoseconds. - Weight::from_parts(385_003_285, 131758) + // Minimum execution time: 425_713_000 picoseconds. + Weight::from_parts(394_260_924, 131758) // Standard Error: 12 - .saturating_add(Weight::from_parts(1_038, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(1_032, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -1203,10 +1258,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `927 + r * (292 ±0)` // Estimated: `929 + r * (293 ±0)` - // Minimum execution time: 274_099_000 picoseconds. - Weight::from_parts(174_282_817, 929) - // Standard Error: 10_547 - .saturating_add(Weight::from_parts(6_262_173, 0).saturating_mul(r.into())) + // Minimum execution time: 268_128_000 picoseconds. + Weight::from_parts(186_787_113, 929) + // Standard Error: 10_796 + .saturating_add(Weight::from_parts(6_454_780, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) @@ -1220,10 +1275,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1450` // Estimated: `1433` - // Minimum execution time: 274_061_000 picoseconds. - Weight::from_parts(334_755_333, 1433) - // Standard Error: 66 - .saturating_add(Weight::from_parts(771, 0).saturating_mul(n.into())) + // Minimum execution time: 286_565_000 picoseconds. + Weight::from_parts(349_504_932, 1433) + // Standard Error: 70 + .saturating_add(Weight::from_parts(530, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(15_u64)) .saturating_add(T::DbWeight::get().writes(8_u64)) } @@ -1234,8 +1289,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1256 + n * (1 ±0)` // Estimated: `1256 + n * (1 ±0)` - // Minimum execution time: 272_657_000 picoseconds. - Weight::from_parts(299_061_594, 1256) + // Minimum execution time: 282_478_000 picoseconds. + Weight::from_parts(303_448_260, 1256) + // Standard Error: 34 + .saturating_add(Weight::from_parts(712, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -1247,10 +1304,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `924 + r * (288 ±0)` // Estimated: `930 + r * (289 ±0)` - // Minimum execution time: 270_524_000 picoseconds. - Weight::from_parts(177_959_667, 930) - // Standard Error: 10_397 - .saturating_add(Weight::from_parts(6_270_181, 0).saturating_mul(r.into())) + // Minimum execution time: 271_793_000 picoseconds. + Weight::from_parts(179_158_648, 930) + // Standard Error: 11_868 + .saturating_add(Weight::from_parts(6_397_986, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) @@ -1264,8 +1321,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1252 + n * (1 ±0)` // Estimated: `1252 + n * (1 ±0)` - // Minimum execution time: 270_757_000 picoseconds. - Weight::from_parts(298_627_896, 1252) + // Minimum execution time: 273_945_000 picoseconds. + Weight::from_parts(299_855_996, 1252) + // Standard Error: 31 + .saturating_add(Weight::from_parts(309, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -1277,10 +1336,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `924 + r * (296 ±0)` // Estimated: `926 + r * (297 ±0)` - // Minimum execution time: 268_082_000 picoseconds. - Weight::from_parts(202_583_730, 926) - // Standard Error: 9_029 - .saturating_add(Weight::from_parts(5_028_517, 0).saturating_mul(r.into())) + // Minimum execution time: 275_285_000 picoseconds. + Weight::from_parts(207_735_572, 926) + // Standard Error: 9_736 + .saturating_add(Weight::from_parts(5_162_837, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) @@ -1293,10 +1352,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1268 + n * (1 ±0)` // Estimated: `1268 + n * (1 ±0)` - // Minimum execution time: 274_100_000 picoseconds. - Weight::from_parts(296_981_515, 1268) + // Minimum execution time: 278_929_000 picoseconds. + Weight::from_parts(302_251_674, 1268) // Standard Error: 34 - .saturating_add(Weight::from_parts(578, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(583, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -1308,10 +1367,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `935 + r * (288 ±0)` // Estimated: `932 + r * (289 ±0)` - // Minimum execution time: 269_697_000 picoseconds. - Weight::from_parts(198_346_516, 932) - // Standard Error: 8_623 - .saturating_add(Weight::from_parts(4_964_116, 0).saturating_mul(r.into())) + // Minimum execution time: 258_476_000 picoseconds. + Weight::from_parts(209_578_051, 932) + // Standard Error: 8_255 + .saturating_add(Weight::from_parts(4_942_572, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) @@ -1324,10 +1383,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1255 + n * (1 ±0)` // Estimated: `1255 + n * (1 ±0)` - // Minimum execution time: 264_210_000 picoseconds. - Weight::from_parts(291_387_652, 1255) - // Standard Error: 36 - .saturating_add(Weight::from_parts(524, 0).saturating_mul(n.into())) + // Minimum execution time: 273_089_000 picoseconds. + Weight::from_parts(302_452_604, 1255) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -1339,10 +1396,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `917 + r * (296 ±0)` // Estimated: `922 + r * (297 ±0)` - // Minimum execution time: 267_219_000 picoseconds. - Weight::from_parts(175_866_982, 922) - // Standard Error: 11_275 - .saturating_add(Weight::from_parts(6_424_755, 0).saturating_mul(r.into())) + // Minimum execution time: 274_301_000 picoseconds. + Weight::from_parts(172_245_469, 922) + // Standard Error: 11_306 + .saturating_add(Weight::from_parts(6_526_825, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) @@ -1356,10 +1413,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1269 + n * (1 ±0)` // Estimated: `1269 + n * (1 ±0)` - // Minimum execution time: 274_634_000 picoseconds. - Weight::from_parts(294_272_009, 1269) + // Minimum execution time: 280_399_000 picoseconds. + Weight::from_parts(305_970_974, 1269) // Standard Error: 36 - .saturating_add(Weight::from_parts(1_219, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(568, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -1368,6 +1425,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1602 w:1601) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1383,10 +1442,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1418 + r * (45 ±0)` // Estimated: `9785 + r * (2520 ±0)` - // Minimum execution time: 266_833_000 picoseconds. - Weight::from_parts(186_049_741, 9785) - // Standard Error: 40_311 - .saturating_add(Weight::from_parts(31_365_585, 0).saturating_mul(r.into())) + // Minimum execution time: 258_452_000 picoseconds. + Weight::from_parts(276_401_000, 9785) + // Standard Error: 65_648 + .saturating_add(Weight::from_parts(33_890_852, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) @@ -1397,6 +1456,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:801 w:801) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:2 w:0) @@ -1412,10 +1473,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1263 + r * (245 ±0)` // Estimated: `9635 + r * (2721 ±0)` - // Minimum execution time: 272_140_000 picoseconds. - Weight::from_parts(275_009_000, 9635) - // Standard Error: 99_187 - .saturating_add(Weight::from_parts(240_702_479, 0).saturating_mul(r.into())) + // Minimum execution time: 281_394_000 picoseconds. + Weight::from_parts(286_475_000, 9635) + // Standard Error: 156_302 + .saturating_add(Weight::from_parts(250_370_283, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(14_u64)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) @@ -1426,6 +1487,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:736 w:0) @@ -1441,10 +1504,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0 + r * (576 ±0)` // Estimated: `9290 + r * (2637 ±10)` - // Minimum execution time: 263_664_000 picoseconds. - Weight::from_parts(277_240_000, 9290) - // Standard Error: 169_147 - .saturating_add(Weight::from_parts(240_084_929, 0).saturating_mul(r.into())) + // Minimum execution time: 278_193_000 picoseconds. + Weight::from_parts(280_814_000, 9290) + // Standard Error: 164_401 + .saturating_add(Weight::from_parts(251_272_834, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) @@ -1455,6 +1518,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:3 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:2 w:2) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:2 w:0) @@ -1471,12 +1536,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1310 + t * (277 ±0)` // Estimated: `12200 + t * (5227 ±0)` - // Minimum execution time: 464_644_000 picoseconds. - Weight::from_parts(40_377_313, 12200) - // Standard Error: 12_060_226 - .saturating_add(Weight::from_parts(380_635_982, 0).saturating_mul(t.into())) + // Minimum execution time: 476_812_000 picoseconds. + Weight::from_parts(70_715_306, 12200) + // Standard Error: 12_232_109 + .saturating_add(Weight::from_parts(374_277_042, 0).saturating_mul(t.into())) // Standard Error: 17 - .saturating_add(Weight::from_parts(1_014, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(1_022, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(16_u64)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes(6_u64)) @@ -1487,6 +1552,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:802 w:802) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:801 w:801) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:801 w:800) @@ -1500,16 +1567,16 @@ impl WeightInfo for SubstrateWeight { /// Storage: `System::EventTopics` (r:803 w:803) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Balances::Holds` (r:800 w:800) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// The range of component `r` is `[1, 800]`. fn seal_instantiate(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `1281 + r * (255 ±0)` // Estimated: `9623 + r * (2731 ±0)` - // Minimum execution time: 641_845_000 picoseconds. - Weight::from_parts(649_908_000, 9623) - // Standard Error: 278_514 - .saturating_add(Weight::from_parts(361_164_598, 0).saturating_mul(r.into())) + // Minimum execution time: 656_480_000 picoseconds. + Weight::from_parts(668_579_000, 9623) + // Standard Error: 365_458 + .saturating_add(Weight::from_parts(379_238_223, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(14_u64)) .saturating_add(T::DbWeight::get().reads((6_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(7_u64)) @@ -1520,6 +1587,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:3 w:3) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:2 w:2) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:2 w:1) @@ -1533,7 +1602,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: `System::EventTopics` (r:4 w:4) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// The range of component `t` is `[0, 1]`. /// The range of component `i` is `[0, 983040]`. /// The range of component `s` is `[0, 983040]`. @@ -1541,12 +1610,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1306 + t * (104 ±0)` // Estimated: `12214 + t * (2549 ±1)` - // Minimum execution time: 2_111_632_000 picoseconds. - Weight::from_parts(1_213_125_745, 12214) - // Standard Error: 22 - .saturating_add(Weight::from_parts(1_055, 0).saturating_mul(i.into())) - // Standard Error: 22 - .saturating_add(Weight::from_parts(1_192, 0).saturating_mul(s.into())) + // Minimum execution time: 2_148_964_000 picoseconds. + Weight::from_parts(1_557_685_999, 12214) + // Standard Error: 36 + .saturating_add(Weight::from_parts(864, 0).saturating_mul(i.into())) + // Standard Error: 36 + .saturating_add(Weight::from_parts(1_092, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(19_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes(11_u64)) @@ -1557,6 +1626,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1572,10 +1643,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `865 + r * (8 ±0)` // Estimated: `9279 + r * (8 ±0)` - // Minimum execution time: 264_227_000 picoseconds. - Weight::from_parts(281_015_995, 9279) - // Standard Error: 710 - .saturating_add(Weight::from_parts(365_734, 0).saturating_mul(r.into())) + // Minimum execution time: 279_377_000 picoseconds. + Weight::from_parts(287_951_287, 9279) + // Standard Error: 659 + .saturating_add(Weight::from_parts(376_476, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) @@ -1584,6 +1655,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1599,10 +1672,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `873` // Estimated: `9286` - // Minimum execution time: 270_709_000 picoseconds. - Weight::from_parts(268_416_051, 9286) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_080, 0).saturating_mul(n.into())) + // Minimum execution time: 276_151_000 picoseconds. + Weight::from_parts(267_656_959, 9286) + // Standard Error: 2 + .saturating_add(Weight::from_parts(1_108, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -1610,6 +1683,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1625,10 +1700,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `867 + r * (8 ±0)` // Estimated: `9284 + r * (8 ±0)` - // Minimum execution time: 267_283_000 picoseconds. - Weight::from_parts(280_706_659, 9284) - // Standard Error: 540 - .saturating_add(Weight::from_parts(790_312, 0).saturating_mul(r.into())) + // Minimum execution time: 275_247_000 picoseconds. + Weight::from_parts(286_675_317, 9284) + // Standard Error: 601 + .saturating_add(Weight::from_parts(788_160, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) @@ -1637,6 +1712,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1652,10 +1729,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `875` // Estimated: `9292` - // Minimum execution time: 262_010_000 picoseconds. - Weight::from_parts(273_278_744, 9292) + // Minimum execution time: 281_585_000 picoseconds. + Weight::from_parts(287_637_844, 9292) // Standard Error: 1 - .saturating_add(Weight::from_parts(3_362, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(3_351, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -1663,6 +1740,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1678,10 +1757,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `867 + r * (8 ±0)` // Estimated: `9286 + r * (8 ±0)` - // Minimum execution time: 268_698_000 picoseconds. - Weight::from_parts(282_890_578, 9286) - // Standard Error: 622 - .saturating_add(Weight::from_parts(441_131, 0).saturating_mul(r.into())) + // Minimum execution time: 273_678_000 picoseconds. + Weight::from_parts(289_879_306, 9286) + // Standard Error: 607 + .saturating_add(Weight::from_parts(439_482, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) @@ -1690,6 +1769,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1705,10 +1786,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `875` // Estimated: `9291` - // Minimum execution time: 252_846_000 picoseconds. - Weight::from_parts(267_657_561, 9291) + // Minimum execution time: 275_126_000 picoseconds. + Weight::from_parts(276_684_594, 9291) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_208, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_202, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -1716,6 +1797,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1731,10 +1814,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `867 + r * (8 ±0)` // Estimated: `9283 + r * (8 ±0)` - // Minimum execution time: 266_899_000 picoseconds. - Weight::from_parts(276_862_067, 9283) - // Standard Error: 1_249 - .saturating_add(Weight::from_parts(443_970, 0).saturating_mul(r.into())) + // Minimum execution time: 273_229_000 picoseconds. + Weight::from_parts(287_793_841, 9283) + // Standard Error: 451 + .saturating_add(Weight::from_parts(447_922, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) @@ -1743,6 +1826,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1758,10 +1843,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `875` // Estimated: `9289` - // Minimum execution time: 265_413_000 picoseconds. - Weight::from_parts(265_600_840, 9289) + // Minimum execution time: 277_843_000 picoseconds. + Weight::from_parts(279_900_099, 9289) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_203, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_204, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -1769,6 +1854,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1784,10 +1871,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1000 + n * (1 ±0)` // Estimated: `9412 + n * (1 ±0)` - // Minimum execution time: 328_341_000 picoseconds. - Weight::from_parts(341_038_581, 9412) - // Standard Error: 10 - .saturating_add(Weight::from_parts(5_863, 0).saturating_mul(n.into())) + // Minimum execution time: 331_840_000 picoseconds. + Weight::from_parts(338_767_191, 9412) + // Standard Error: 11 + .saturating_add(Weight::from_parts(5_971, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -1796,6 +1883,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1811,10 +1900,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `808 + r * (112 ±0)` // Estimated: `9226 + r * (112 ±0)` - // Minimum execution time: 272_730_000 picoseconds. - Weight::from_parts(339_349_232, 9226) - // Standard Error: 13_004 - .saturating_add(Weight::from_parts(41_649_026, 0).saturating_mul(r.into())) + // Minimum execution time: 277_912_000 picoseconds. + Weight::from_parts(344_538_960, 9226) + // Standard Error: 13_422 + .saturating_add(Weight::from_parts(41_592_887, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 112).saturating_mul(r.into())) @@ -1823,6 +1912,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1838,10 +1929,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `910 + r * (76 ±0)` // Estimated: `9279 + r * (77 ±0)` - // Minimum execution time: 273_892_000 picoseconds. - Weight::from_parts(352_853_960, 9279) - // Standard Error: 16_745 - .saturating_add(Weight::from_parts(45_853_294, 0).saturating_mul(r.into())) + // Minimum execution time: 280_383_000 picoseconds. + Weight::from_parts(348_542_377, 9279) + // Standard Error: 13_985 + .saturating_add(Weight::from_parts(45_983_827, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 77).saturating_mul(r.into())) @@ -1850,6 +1941,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1865,10 +1958,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `880 + r * (42 ±0)` // Estimated: `9294 + r * (42 ±0)` - // Minimum execution time: 268_431_000 picoseconds. - Weight::from_parts(319_727_796, 9294) - // Standard Error: 10_276 - .saturating_add(Weight::from_parts(11_928_882, 0).saturating_mul(r.into())) + // Minimum execution time: 277_764_000 picoseconds. + Weight::from_parts(320_288_180, 9294) + // Standard Error: 10_140 + .saturating_add(Weight::from_parts(12_046_137, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 42).saturating_mul(r.into())) @@ -1877,6 +1970,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1536 w:1536) @@ -1892,10 +1987,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0 + r * (965 ±0)` // Estimated: `9285 + r * (3090 ±7)` - // Minimum execution time: 275_482_000 picoseconds. - Weight::from_parts(279_155_000, 9285) - // Standard Error: 65_388 - .saturating_add(Weight::from_parts(26_634_187, 0).saturating_mul(r.into())) + // Minimum execution time: 271_356_000 picoseconds. + Weight::from_parts(282_924_000, 9285) + // Standard Error: 60_493 + .saturating_add(Weight::from_parts(28_319_267, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) @@ -1906,6 +2001,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:33 w:32) @@ -1921,20 +2018,22 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `937 + r * (131 ±0)` // Estimated: `9346 + r * (2607 ±0)` - // Minimum execution time: 270_001_000 picoseconds. - Weight::from_parts(286_792_689, 9346) - // Standard Error: 21_074 - .saturating_add(Weight::from_parts(6_465_885, 0).saturating_mul(r.into())) + // Minimum execution time: 269_698_000 picoseconds. + Weight::from_parts(294_325_127, 9346) + // Standard Error: 22_352 + .saturating_add(Weight::from_parts(6_744_117, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2606).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 2607).saturating_mul(r.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `MaxEncodedLen`) /// Storage: `Contracts::CodeInfoOf` (r:33 w:32) @@ -1948,12 +2047,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 32]`. fn unlock_delegate_dependency(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `969 + r * (183 ±0)` + // Measured: `972 + r * (184 ±0)` // Estimated: `129453 + r * (2568 ±0)` - // Minimum execution time: 270_652_000 picoseconds. - Weight::from_parts(293_369_452, 129453) - // Standard Error: 24_321 - .saturating_add(Weight::from_parts(5_575_600, 0).saturating_mul(r.into())) + // Minimum execution time: 261_226_000 picoseconds. + Weight::from_parts(294_299_527, 129453) + // Standard Error: 27_898 + .saturating_add(Weight::from_parts(6_031_601, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) @@ -1964,6 +2063,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1979,10 +2080,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `861 + r * (3 ±0)` // Estimated: `9282 + r * (3 ±0)` - // Minimum execution time: 267_749_000 picoseconds. - Weight::from_parts(280_373_341, 9282) - // Standard Error: 464 - .saturating_add(Weight::from_parts(170_398, 0).saturating_mul(r.into())) + // Minimum execution time: 270_729_000 picoseconds. + Weight::from_parts(289_622_807, 9282) + // Standard Error: 394 + .saturating_add(Weight::from_parts(167_010, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) @@ -1991,6 +2092,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2006,10 +2109,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2112 + r * (39 ±0)` // Estimated: `10377 + r * (40 ±0)` - // Minimum execution time: 270_260_000 picoseconds. - Weight::from_parts(337_969_172, 10377) - // Standard Error: 1_557 - .saturating_add(Weight::from_parts(305_735, 0).saturating_mul(r.into())) + // Minimum execution time: 272_228_000 picoseconds. + Weight::from_parts(351_059_276, 10377) + // Standard Error: 1_761 + .saturating_add(Weight::from_parts(312_269, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 40).saturating_mul(r.into())) @@ -2018,6 +2121,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2035,10 +2140,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `864 + r * (3 ±0)` // Estimated: `9279 + r * (3 ±0)` - // Minimum execution time: 265_607_000 picoseconds. - Weight::from_parts(283_396_630, 9279) - // Standard Error: 449 - .saturating_add(Weight::from_parts(149_018, 0).saturating_mul(r.into())) + // Minimum execution time: 272_497_000 picoseconds. + Weight::from_parts(288_213_060, 9279) + // Standard Error: 469 + .saturating_add(Weight::from_parts(155_530, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) @@ -2048,10 +2153,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_813_000 picoseconds. - Weight::from_parts(1_264_945, 0) - // Standard Error: 19 - .saturating_add(Weight::from_parts(15_098, 0).saturating_mul(r.into())) + // Minimum execution time: 1_990_000 picoseconds. + Weight::from_parts(1_778_221, 0) + // Standard Error: 26 + .saturating_add(Weight::from_parts(14_888, 0).saturating_mul(r.into())) } } @@ -2063,8 +2168,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_054_000 picoseconds. - Weight::from_parts(2_178_000, 1627) + // Minimum execution time: 2_130_000 picoseconds. + Weight::from_parts(2_247_000, 1627) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -2074,10 +2179,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `452 + k * (69 ±0)` // Estimated: `442 + k * (70 ±0)` - // Minimum execution time: 12_119_000 picoseconds. - Weight::from_parts(12_536_000, 442) - // Standard Error: 1_254 - .saturating_add(Weight::from_parts(1_161_885, 0).saturating_mul(k.into())) + // Minimum execution time: 12_748_000 picoseconds. + Weight::from_parts(13_001_000, 442) + // Standard Error: 1_206 + .saturating_add(Weight::from_parts(1_146_159, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -2091,10 +2196,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `211 + c * (1 ±0)` // Estimated: `6149 + c * (1 ±0)` - // Minimum execution time: 8_146_000 picoseconds. - Weight::from_parts(8_560_745, 6149) + // Minimum execution time: 8_636_000 picoseconds. + Weight::from_parts(8_664_917, 6149) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_182, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(1_188, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -2107,8 +2212,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `510` // Estimated: `6450` - // Minimum execution time: 16_183_000 picoseconds. - Weight::from_parts(16_825_000, 6450) + // Minimum execution time: 16_838_000 picoseconds. + Weight::from_parts(17_400_000, 6450) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -2121,10 +2226,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `171 + k * (1 ±0)` // Estimated: `3635 + k * (1 ±0)` - // Minimum execution time: 3_645_000 picoseconds. - Weight::from_parts(604_365, 3635) - // Standard Error: 1_013 - .saturating_add(Weight::from_parts(1_100_277, 0).saturating_mul(k.into())) + // Minimum execution time: 3_876_000 picoseconds. + Weight::from_parts(2_654_935, 3635) + // Standard Error: 2_001 + .saturating_add(Weight::from_parts(1_258_085, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -2134,6 +2239,8 @@ impl WeightInfo for () { /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553053f13fd319a03c211337c76e0fe776df` (r:2 w:0) /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:1 w:1) /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:1 w:1) + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:0 w:1) @@ -2143,10 +2250,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `328 + c * (1 ±0)` // Estimated: `6266 + c * (1 ±0)` - // Minimum execution time: 19_500_000 picoseconds. - Weight::from_parts(20_074_895, 6266) + // Minimum execution time: 21_038_000 picoseconds. + Weight::from_parts(20_890_548, 6266) // Standard Error: 1 - .saturating_add(Weight::from_parts(422, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(435, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -2157,8 +2264,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `440` // Estimated: `6380` - // Minimum execution time: 12_530_000 picoseconds. - Weight::from_parts(13_362_000, 6380) + // Minimum execution time: 12_579_000 picoseconds. + Weight::from_parts(13_486_000, 6380) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -2167,13 +2274,13 @@ impl WeightInfo for () { /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) fn v14_migration_step() -> Weight { // Proof Size summary in bytes: // Measured: `352` // Estimated: `6292` - // Minimum execution time: 44_275_000 picoseconds. - Weight::from_parts(45_718_000, 6292) + // Minimum execution time: 47_123_000 picoseconds. + Weight::from_parts(48_284_000, 6292) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -2185,8 +2292,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `594` // Estimated: `6534` - // Minimum execution time: 55_095_000 picoseconds. - Weight::from_parts(58_009_000, 6534) + // Minimum execution time: 55_237_000 picoseconds. + Weight::from_parts(57_996_000, 6534) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -2196,8 +2303,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_455_000 picoseconds. - Weight::from_parts(2_608_000, 1627) + // Minimum execution time: 2_766_000 picoseconds. + Weight::from_parts(2_807_000, 1627) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -2209,8 +2316,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `166` // Estimated: `3631` - // Minimum execution time: 11_207_000 picoseconds. - Weight::from_parts(11_802_000, 3631) + // Minimum execution time: 12_243_000 picoseconds. + Weight::from_parts(12_890_000, 3631) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -2220,8 +2327,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 4_581_000 picoseconds. - Weight::from_parts(4_781_000, 3607) + // Minimum execution time: 4_951_000 picoseconds. + Weight::from_parts(5_232_000, 3607) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -2232,8 +2339,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `167` // Estimated: `3632` - // Minimum execution time: 5_887_000 picoseconds. - Weight::from_parts(6_393_000, 3632) + // Minimum execution time: 6_530_000 picoseconds. + Weight::from_parts(6_726_000, 3632) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -2244,13 +2351,15 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 6_025_000 picoseconds. - Weight::from_parts(6_298_000, 3607) + // Minimum execution time: 6_227_000 picoseconds. + Weight::from_parts(6_708_000, 3607) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2268,20 +2377,22 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `804 + c * (1 ±0)` // Estimated: `9217 + c * (1 ±0)` - // Minimum execution time: 294_976_000 picoseconds. - Weight::from_parts(277_235_948, 9217) - // Standard Error: 65 - .saturating_add(Weight::from_parts(34_445, 0).saturating_mul(c.into())) + // Minimum execution time: 309_889_000 picoseconds. + Weight::from_parts(277_084_159, 9217) + // Standard Error: 71 + .saturating_add(Weight::from_parts(33_471, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:2 w:2) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// Storage: `System::EventTopics` (r:3 w:3) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Contracts::Nonce` (r:1 w:1) @@ -2301,14 +2412,14 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `326` // Estimated: `8740` - // Minimum execution time: 3_864_558_000 picoseconds. - Weight::from_parts(421_676_889, 8740) - // Standard Error: 188 - .saturating_add(Weight::from_parts(103_788, 0).saturating_mul(c.into())) - // Standard Error: 22 - .saturating_add(Weight::from_parts(1_715, 0).saturating_mul(i.into())) - // Standard Error: 22 - .saturating_add(Weight::from_parts(1_774, 0).saturating_mul(s.into())) + // Minimum execution time: 3_909_680_000 picoseconds. + Weight::from_parts(446_471_160, 8740) + // Standard Error: 159 + .saturating_add(Weight::from_parts(101_085, 0).saturating_mul(c.into())) + // Standard Error: 19 + .saturating_add(Weight::from_parts(1_598, 0).saturating_mul(i.into())) + // Standard Error: 19 + .saturating_add(Weight::from_parts(1_879, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(14_u64)) .saturating_add(RocksDbWeight::get().writes(10_u64)) } @@ -2318,6 +2429,8 @@ impl WeightInfo for () { /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:1 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::Nonce` (r:1 w:1) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) @@ -2329,24 +2442,26 @@ impl WeightInfo for () { /// Storage: `System::EventTopics` (r:2 w:2) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// The range of component `i` is `[0, 1048576]`. /// The range of component `s` is `[0, 1048576]`. fn instantiate(i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `563` // Estimated: `8982` - // Minimum execution time: 2_069_276_000 picoseconds. - Weight::from_parts(377_210_279, 8982) - // Standard Error: 9 - .saturating_add(Weight::from_parts(1_719, 0).saturating_mul(i.into())) - // Standard Error: 9 - .saturating_add(Weight::from_parts(1_728, 0).saturating_mul(s.into())) + // Minimum execution time: 1_968_545_000 picoseconds. + Weight::from_parts(420_048_028, 8982) + // Standard Error: 20 + .saturating_add(Weight::from_parts(1_685, 0).saturating_mul(i.into())) + // Standard Error: 20 + .saturating_add(Weight::from_parts(1_645, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(13_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2363,17 +2478,19 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `829` // Estimated: `9244` - // Minimum execution time: 199_037_000 picoseconds. - Weight::from_parts(213_931_000, 9244) + // Minimum execution time: 207_564_000 picoseconds. + Weight::from_parts(216_983_000, 9244) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// Storage: `System::EventTopics` (r:1 w:1) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:0 w:1) @@ -2383,10 +2500,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `145` // Estimated: `6085` - // Minimum execution time: 278_482_000 picoseconds. - Weight::from_parts(262_753_879, 6085) - // Standard Error: 112 - .saturating_add(Weight::from_parts(66_129, 0).saturating_mul(c.into())) + // Minimum execution time: 273_555_000 picoseconds. + Weight::from_parts(257_517_935, 6085) + // Standard Error: 148 + .saturating_add(Weight::from_parts(64_488, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -2407,10 +2524,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `145` // Estimated: `6085` - // Minimum execution time: 286_902_000 picoseconds. - Weight::from_parts(269_003_959, 6085) - // Standard Error: 123 - .saturating_add(Weight::from_parts(66_629, 0).saturating_mul(c.into())) + // Minimum execution time: 289_672_000 picoseconds. + Weight::from_parts(297_020_278, 6085) + // Standard Error: 86 + .saturating_add(Weight::from_parts(64_340, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -2419,7 +2536,7 @@ impl WeightInfo for () { /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// Storage: `System::EventTopics` (r:1 w:1) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:0 w:1) @@ -2428,8 +2545,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `315` // Estimated: `3780` - // Minimum execution time: 44_128_000 picoseconds. - Weight::from_parts(46_044_000, 3780) + // Minimum execution time: 45_930_000 picoseconds. + Weight::from_parts(47_288_000, 3780) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -2445,8 +2562,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `552` // Estimated: `8967` - // Minimum execution time: 33_567_000 picoseconds. - Weight::from_parts(35_057_000, 8967) + // Minimum execution time: 35_421_000 picoseconds. + Weight::from_parts(36_909_000, 8967) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -2454,6 +2571,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2469,10 +2588,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `869 + r * (6 ±0)` // Estimated: `9284 + r * (6 ±0)` - // Minimum execution time: 272_376_000 picoseconds. - Weight::from_parts(284_162_161, 9284) - // Standard Error: 501 - .saturating_add(Weight::from_parts(341_575, 0).saturating_mul(r.into())) + // Minimum execution time: 275_531_000 picoseconds. + Weight::from_parts(292_269_656, 9284) + // Standard Error: 672 + .saturating_add(Weight::from_parts(339_728, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -2481,6 +2600,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1601 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2496,10 +2617,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `925 + r * (209 ±0)` // Estimated: `9304 + r * (2684 ±0)` - // Minimum execution time: 256_990_000 picoseconds. - Weight::from_parts(107_167_044, 9304) - // Standard Error: 7_545 - .saturating_add(Weight::from_parts(3_628_112, 0).saturating_mul(r.into())) + // Minimum execution time: 275_829_000 picoseconds. + Weight::from_parts(124_543_289, 9304) + // Standard Error: 6_085 + .saturating_add(Weight::from_parts(3_702_964, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) @@ -2509,6 +2630,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1601 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2524,10 +2647,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `924 + r * (213 ±0)` // Estimated: `9308 + r * (2688 ±0)` - // Minimum execution time: 272_889_000 picoseconds. - Weight::from_parts(110_341_799, 9308) - // Standard Error: 7_881 - .saturating_add(Weight::from_parts(4_427_043, 0).saturating_mul(r.into())) + // Minimum execution time: 276_666_000 picoseconds. + Weight::from_parts(96_951_288, 9308) + // Standard Error: 8_876 + .saturating_add(Weight::from_parts(4_604_699, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) @@ -2537,6 +2660,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2552,10 +2677,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `876 + r * (6 ±0)` // Estimated: `9293 + r * (6 ±0)` - // Minimum execution time: 275_027_000 picoseconds. - Weight::from_parts(283_302_223, 9293) - // Standard Error: 1_179 - .saturating_add(Weight::from_parts(436_023, 0).saturating_mul(r.into())) + // Minimum execution time: 271_301_000 picoseconds. + Weight::from_parts(284_126_054, 9293) + // Standard Error: 886 + .saturating_add(Weight::from_parts(437_127, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -2564,6 +2689,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2579,16 +2706,18 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `866 + r * (3 ±0)` // Estimated: `9282 + r * (3 ±0)` - // Minimum execution time: 270_137_000 picoseconds. - Weight::from_parts(282_756_362, 9282) - // Standard Error: 384 - .saturating_add(Weight::from_parts(171_660, 0).saturating_mul(r.into())) + // Minimum execution time: 274_778_000 picoseconds. + Weight::from_parts(289_355_269, 9282) + // Standard Error: 382 + .saturating_add(Weight::from_parts(175_342, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2604,10 +2733,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `756 + r * (3 ±0)` // Estimated: `9171 + r * (3 ±0)` - // Minimum execution time: 255_131_000 picoseconds. - Weight::from_parts(269_207_006, 9171) - // Standard Error: 542 - .saturating_add(Weight::from_parts(161_597, 0).saturating_mul(r.into())) + // Minimum execution time: 257_310_000 picoseconds. + Weight::from_parts(276_410_847, 9171) + // Standard Error: 733 + .saturating_add(Weight::from_parts(160_094, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(10_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) @@ -2616,6 +2745,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2631,10 +2762,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `870 + r * (6 ±0)` // Estimated: `9285 + r * (6 ±0)` - // Minimum execution time: 272_172_000 picoseconds. - Weight::from_parts(283_747_134, 9285) - // Standard Error: 885 - .saturating_add(Weight::from_parts(343_968, 0).saturating_mul(r.into())) + // Minimum execution time: 278_305_000 picoseconds. + Weight::from_parts(282_372_935, 9285) + // Standard Error: 1_154 + .saturating_add(Weight::from_parts(343_382, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -2643,6 +2774,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2658,10 +2791,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `866 + r * (6 ±0)` // Estimated: `9284 + r * (6 ±0)` - // Minimum execution time: 263_220_000 picoseconds. - Weight::from_parts(277_062_382, 9284) - // Standard Error: 1_783 - .saturating_add(Weight::from_parts(399_631, 0).saturating_mul(r.into())) + // Minimum execution time: 280_349_000 picoseconds. + Weight::from_parts(294_864_875, 9284) + // Standard Error: 1_010 + .saturating_add(Weight::from_parts(368_740, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -2670,6 +2803,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:2 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2685,10 +2820,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1010 + r * (6 ±0)` // Estimated: `9409 + r * (6 ±0)` - // Minimum execution time: 253_218_000 picoseconds. - Weight::from_parts(282_251_452, 9409) - // Standard Error: 2_367 - .saturating_add(Weight::from_parts(1_604_060, 0).saturating_mul(r.into())) + // Minimum execution time: 274_578_000 picoseconds. + Weight::from_parts(313_285_034, 9409) + // Standard Error: 2_800 + .saturating_add(Weight::from_parts(1_653_468, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -2697,6 +2832,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2712,10 +2849,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `880 + r * (6 ±0)` // Estimated: `9301 + r * (6 ±0)` - // Minimum execution time: 271_797_000 picoseconds. - Weight::from_parts(288_594_393, 9301) - // Standard Error: 846 - .saturating_add(Weight::from_parts(335_063, 0).saturating_mul(r.into())) + // Minimum execution time: 287_127_000 picoseconds. + Weight::from_parts(290_489_909, 9301) + // Standard Error: 864 + .saturating_add(Weight::from_parts(332_977, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -2724,6 +2861,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2739,10 +2878,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `878 + r * (6 ±0)` // Estimated: `9294 + r * (6 ±0)` - // Minimum execution time: 254_997_000 picoseconds. - Weight::from_parts(284_331_894, 9294) - // Standard Error: 885 - .saturating_add(Weight::from_parts(337_073, 0).saturating_mul(r.into())) + // Minimum execution time: 273_291_000 picoseconds. + Weight::from_parts(293_650_716, 9294) + // Standard Error: 725 + .saturating_add(Weight::from_parts(323_281, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -2751,6 +2890,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2766,10 +2907,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `875 + r * (6 ±0)` // Estimated: `9297 + r * (6 ±0)` - // Minimum execution time: 273_845_000 picoseconds. - Weight::from_parts(285_291_242, 9297) - // Standard Error: 690 - .saturating_add(Weight::from_parts(332_783, 0).saturating_mul(r.into())) + // Minimum execution time: 282_061_000 picoseconds. + Weight::from_parts(291_729_751, 9297) + // Standard Error: 929 + .saturating_add(Weight::from_parts(324_683, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -2778,6 +2919,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2793,10 +2936,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `866 + r * (6 ±0)` // Estimated: `9282 + r * (6 ±0)` - // Minimum execution time: 268_302_000 picoseconds. - Weight::from_parts(280_463_257, 9282) - // Standard Error: 1_035 - .saturating_add(Weight::from_parts(345_811, 0).saturating_mul(r.into())) + // Minimum execution time: 264_505_000 picoseconds. + Weight::from_parts(293_440_286, 9282) + // Standard Error: 704 + .saturating_add(Weight::from_parts(329_851, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -2805,6 +2948,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2822,10 +2967,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `940 + r * (14 ±0)` // Estimated: `9350 + r * (14 ±0)` - // Minimum execution time: 263_433_000 picoseconds. - Weight::from_parts(290_098_370, 9350) - // Standard Error: 1_905 - .saturating_add(Weight::from_parts(805_299, 0).saturating_mul(r.into())) + // Minimum execution time: 277_208_000 picoseconds. + Weight::from_parts(304_294_691, 9350) + // Standard Error: 1_083 + .saturating_add(Weight::from_parts(824_245, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 14).saturating_mul(r.into())) @@ -2834,6 +2979,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2849,10 +2996,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `868 + r * (6 ±0)` // Estimated: `9285 + r * (6 ±0)` - // Minimum execution time: 273_588_000 picoseconds. - Weight::from_parts(287_133_565, 9285) - // Standard Error: 799 - .saturating_add(Weight::from_parts(254_114, 0).saturating_mul(r.into())) + // Minimum execution time: 278_293_000 picoseconds. + Weight::from_parts(289_743_005, 9285) + // Standard Error: 672 + .saturating_add(Weight::from_parts(267_553, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -2861,6 +3008,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2876,10 +3025,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `872` // Estimated: `9287` - // Minimum execution time: 257_843_000 picoseconds. - Weight::from_parts(228_177_495, 9287) - // Standard Error: 24 - .saturating_add(Weight::from_parts(985, 0).saturating_mul(n.into())) + // Minimum execution time: 279_495_000 picoseconds. + Weight::from_parts(232_736_994, 9287) + // Standard Error: 23 + .saturating_add(Weight::from_parts(1_008, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -2887,6 +3036,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2902,10 +3053,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `856 + r * (45 ±0)` // Estimated: `9271 + r * (45 ±0)` - // Minimum execution time: 248_332_000 picoseconds. - Weight::from_parts(274_998_906, 9271) - // Standard Error: 949_561 - .saturating_add(Weight::from_parts(5_570_693, 0).saturating_mul(r.into())) + // Minimum execution time: 257_920_000 picoseconds. + Weight::from_parts(282_276_265, 9271) + // Standard Error: 948_490 + .saturating_add(Weight::from_parts(2_408_134, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 45).saturating_mul(r.into())) @@ -2914,6 +3065,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2929,10 +3082,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `866` // Estimated: `9288` - // Minimum execution time: 272_055_000 picoseconds. - Weight::from_parts(282_280_090, 9288) - // Standard Error: 1 - .saturating_add(Weight::from_parts(330, 0).saturating_mul(n.into())) + // Minimum execution time: 278_149_000 picoseconds. + Weight::from_parts(287_020_337, 9288) + // Standard Error: 0 + .saturating_add(Weight::from_parts(321, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -2940,9 +3093,11 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:3 w:3) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:1 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:2 w:2) + /// Storage: `Contracts::CodeInfoOf` (r:33 w:33) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:1 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) @@ -2953,28 +3108,30 @@ impl WeightInfo for () { /// Storage: `System::EventTopics` (r:4 w:4) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// Storage: `Contracts::DeletionQueue` (r:0 w:1) /// Proof: `Contracts::DeletionQueue` (`max_values`: None, `max_size`: Some(142), added: 2617, mode: `Measured`) /// The range of component `r` is `[0, 1]`. fn seal_terminate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2902 + r * (529 ±0)` - // Estimated: `11317 + r * (5479 ±0)` - // Minimum execution time: 274_842_000 picoseconds. - Weight::from_parts(299_824_497, 11317) - // Standard Error: 902_686 - .saturating_add(Weight::from_parts(106_562_902, 0).saturating_mul(r.into())) + // Measured: `4805 + r * (2121 ±0)` + // Estimated: `13220 + r * (81321 ±0)` + // Minimum execution time: 307_763_000 picoseconds. + Weight::from_parts(323_648_618, 13220) + // Standard Error: 879_890 + .saturating_add(Weight::from_parts(249_045_481, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) - .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(r.into()))) + .saturating_add(RocksDbWeight::get().reads((36_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(RocksDbWeight::get().writes((10_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 5266).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().writes((41_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 81321).saturating_mul(r.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2992,10 +3149,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `947 + r * (10 ±0)` // Estimated: `9363 + r * (10 ±0)` - // Minimum execution time: 255_393_000 picoseconds. - Weight::from_parts(309_814_338, 9363) - // Standard Error: 2_978 - .saturating_add(Weight::from_parts(1_203_507, 0).saturating_mul(r.into())) + // Minimum execution time: 278_400_000 picoseconds. + Weight::from_parts(293_743_000, 9363) + // Standard Error: 1_686 + .saturating_add(Weight::from_parts(1_288_603, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 10).saturating_mul(r.into())) @@ -3004,6 +3161,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3019,10 +3178,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `866 + r * (10 ±0)` // Estimated: `9283 + r * (10 ±0)` - // Minimum execution time: 250_378_000 picoseconds. - Weight::from_parts(287_003_144, 9283) - // Standard Error: 2_726 - .saturating_add(Weight::from_parts(1_850_967, 0).saturating_mul(r.into())) + // Minimum execution time: 272_110_000 picoseconds. + Weight::from_parts(295_620_726, 9283) + // Standard Error: 5_481 + .saturating_add(Weight::from_parts(2_031_955, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 10).saturating_mul(r.into())) @@ -3031,6 +3190,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3047,12 +3208,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `883 + t * (32 ±0)` // Estimated: `9303 + t * (2508 ±0)` - // Minimum execution time: 266_787_000 picoseconds. - Weight::from_parts(284_368_661, 9303) - // Standard Error: 121_315 - .saturating_add(Weight::from_parts(2_690_211, 0).saturating_mul(t.into())) - // Standard Error: 33 - .saturating_add(Weight::from_parts(789, 0).saturating_mul(n.into())) + // Minimum execution time: 277_409_000 picoseconds. + Weight::from_parts(293_838_037, 9303) + // Standard Error: 87_977 + .saturating_add(Weight::from_parts(2_911_340, 0).saturating_mul(t.into())) + // Standard Error: 24 + .saturating_add(Weight::from_parts(531, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) @@ -3063,6 +3224,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3078,10 +3241,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `865 + r * (7 ±0)` // Estimated: `9285 + r * (7 ±0)` - // Minimum execution time: 166_804_000 picoseconds. - Weight::from_parts(175_118_291, 9285) - // Standard Error: 398 - .saturating_add(Weight::from_parts(220_961, 0).saturating_mul(r.into())) + // Minimum execution time: 172_634_000 picoseconds. + Weight::from_parts(183_322_840, 9285) + // Standard Error: 384 + .saturating_add(Weight::from_parts(226_007, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 7).saturating_mul(r.into())) @@ -3090,6 +3253,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `MaxEncodedLen`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3105,10 +3270,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `125816` // Estimated: `131758` - // Minimum execution time: 398_436_000 picoseconds. - Weight::from_parts(385_003_285, 131758) + // Minimum execution time: 425_713_000 picoseconds. + Weight::from_parts(394_260_924, 131758) // Standard Error: 12 - .saturating_add(Weight::from_parts(1_038, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(1_032, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -3119,10 +3284,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `927 + r * (292 ±0)` // Estimated: `929 + r * (293 ±0)` - // Minimum execution time: 274_099_000 picoseconds. - Weight::from_parts(174_282_817, 929) - // Standard Error: 10_547 - .saturating_add(Weight::from_parts(6_262_173, 0).saturating_mul(r.into())) + // Minimum execution time: 268_128_000 picoseconds. + Weight::from_parts(186_787_113, 929) + // Standard Error: 10_796 + .saturating_add(Weight::from_parts(6_454_780, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) @@ -3136,10 +3301,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1450` // Estimated: `1433` - // Minimum execution time: 274_061_000 picoseconds. - Weight::from_parts(334_755_333, 1433) - // Standard Error: 66 - .saturating_add(Weight::from_parts(771, 0).saturating_mul(n.into())) + // Minimum execution time: 286_565_000 picoseconds. + Weight::from_parts(349_504_932, 1433) + // Standard Error: 70 + .saturating_add(Weight::from_parts(530, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(15_u64)) .saturating_add(RocksDbWeight::get().writes(8_u64)) } @@ -3150,8 +3315,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1256 + n * (1 ±0)` // Estimated: `1256 + n * (1 ±0)` - // Minimum execution time: 272_657_000 picoseconds. - Weight::from_parts(299_061_594, 1256) + // Minimum execution time: 282_478_000 picoseconds. + Weight::from_parts(303_448_260, 1256) + // Standard Error: 34 + .saturating_add(Weight::from_parts(712, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -3163,10 +3330,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `924 + r * (288 ±0)` // Estimated: `930 + r * (289 ±0)` - // Minimum execution time: 270_524_000 picoseconds. - Weight::from_parts(177_959_667, 930) - // Standard Error: 10_397 - .saturating_add(Weight::from_parts(6_270_181, 0).saturating_mul(r.into())) + // Minimum execution time: 271_793_000 picoseconds. + Weight::from_parts(179_158_648, 930) + // Standard Error: 11_868 + .saturating_add(Weight::from_parts(6_397_986, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) @@ -3180,8 +3347,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1252 + n * (1 ±0)` // Estimated: `1252 + n * (1 ±0)` - // Minimum execution time: 270_757_000 picoseconds. - Weight::from_parts(298_627_896, 1252) + // Minimum execution time: 273_945_000 picoseconds. + Weight::from_parts(299_855_996, 1252) + // Standard Error: 31 + .saturating_add(Weight::from_parts(309, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -3193,10 +3362,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `924 + r * (296 ±0)` // Estimated: `926 + r * (297 ±0)` - // Minimum execution time: 268_082_000 picoseconds. - Weight::from_parts(202_583_730, 926) - // Standard Error: 9_029 - .saturating_add(Weight::from_parts(5_028_517, 0).saturating_mul(r.into())) + // Minimum execution time: 275_285_000 picoseconds. + Weight::from_parts(207_735_572, 926) + // Standard Error: 9_736 + .saturating_add(Weight::from_parts(5_162_837, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) @@ -3209,10 +3378,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1268 + n * (1 ±0)` // Estimated: `1268 + n * (1 ±0)` - // Minimum execution time: 274_100_000 picoseconds. - Weight::from_parts(296_981_515, 1268) + // Minimum execution time: 278_929_000 picoseconds. + Weight::from_parts(302_251_674, 1268) // Standard Error: 34 - .saturating_add(Weight::from_parts(578, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(583, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -3224,10 +3393,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `935 + r * (288 ±0)` // Estimated: `932 + r * (289 ±0)` - // Minimum execution time: 269_697_000 picoseconds. - Weight::from_parts(198_346_516, 932) - // Standard Error: 8_623 - .saturating_add(Weight::from_parts(4_964_116, 0).saturating_mul(r.into())) + // Minimum execution time: 258_476_000 picoseconds. + Weight::from_parts(209_578_051, 932) + // Standard Error: 8_255 + .saturating_add(Weight::from_parts(4_942_572, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) @@ -3240,10 +3409,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1255 + n * (1 ±0)` // Estimated: `1255 + n * (1 ±0)` - // Minimum execution time: 264_210_000 picoseconds. - Weight::from_parts(291_387_652, 1255) - // Standard Error: 36 - .saturating_add(Weight::from_parts(524, 0).saturating_mul(n.into())) + // Minimum execution time: 273_089_000 picoseconds. + Weight::from_parts(302_452_604, 1255) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -3255,10 +3422,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `917 + r * (296 ±0)` // Estimated: `922 + r * (297 ±0)` - // Minimum execution time: 267_219_000 picoseconds. - Weight::from_parts(175_866_982, 922) - // Standard Error: 11_275 - .saturating_add(Weight::from_parts(6_424_755, 0).saturating_mul(r.into())) + // Minimum execution time: 274_301_000 picoseconds. + Weight::from_parts(172_245_469, 922) + // Standard Error: 11_306 + .saturating_add(Weight::from_parts(6_526_825, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) @@ -3272,10 +3439,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1269 + n * (1 ±0)` // Estimated: `1269 + n * (1 ±0)` - // Minimum execution time: 274_634_000 picoseconds. - Weight::from_parts(294_272_009, 1269) + // Minimum execution time: 280_399_000 picoseconds. + Weight::from_parts(305_970_974, 1269) // Standard Error: 36 - .saturating_add(Weight::from_parts(1_219, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(568, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -3284,6 +3451,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1602 w:1601) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3299,10 +3468,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1418 + r * (45 ±0)` // Estimated: `9785 + r * (2520 ±0)` - // Minimum execution time: 266_833_000 picoseconds. - Weight::from_parts(186_049_741, 9785) - // Standard Error: 40_311 - .saturating_add(Weight::from_parts(31_365_585, 0).saturating_mul(r.into())) + // Minimum execution time: 258_452_000 picoseconds. + Weight::from_parts(276_401_000, 9785) + // Standard Error: 65_648 + .saturating_add(Weight::from_parts(33_890_852, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) @@ -3313,6 +3482,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:801 w:801) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:2 w:0) @@ -3328,10 +3499,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1263 + r * (245 ±0)` // Estimated: `9635 + r * (2721 ±0)` - // Minimum execution time: 272_140_000 picoseconds. - Weight::from_parts(275_009_000, 9635) - // Standard Error: 99_187 - .saturating_add(Weight::from_parts(240_702_479, 0).saturating_mul(r.into())) + // Minimum execution time: 281_394_000 picoseconds. + Weight::from_parts(286_475_000, 9635) + // Standard Error: 156_302 + .saturating_add(Weight::from_parts(250_370_283, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(14_u64)) .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) @@ -3342,6 +3513,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:736 w:0) @@ -3357,10 +3530,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0 + r * (576 ±0)` // Estimated: `9290 + r * (2637 ±10)` - // Minimum execution time: 263_664_000 picoseconds. - Weight::from_parts(277_240_000, 9290) - // Standard Error: 169_147 - .saturating_add(Weight::from_parts(240_084_929, 0).saturating_mul(r.into())) + // Minimum execution time: 278_193_000 picoseconds. + Weight::from_parts(280_814_000, 9290) + // Standard Error: 164_401 + .saturating_add(Weight::from_parts(251_272_834, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) @@ -3371,6 +3544,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:3 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:2 w:2) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:2 w:0) @@ -3387,12 +3562,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1310 + t * (277 ±0)` // Estimated: `12200 + t * (5227 ±0)` - // Minimum execution time: 464_644_000 picoseconds. - Weight::from_parts(40_377_313, 12200) - // Standard Error: 12_060_226 - .saturating_add(Weight::from_parts(380_635_982, 0).saturating_mul(t.into())) + // Minimum execution time: 476_812_000 picoseconds. + Weight::from_parts(70_715_306, 12200) + // Standard Error: 12_232_109 + .saturating_add(Weight::from_parts(374_277_042, 0).saturating_mul(t.into())) // Standard Error: 17 - .saturating_add(Weight::from_parts(1_014, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(1_022, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(16_u64)) .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes(6_u64)) @@ -3403,6 +3578,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:802 w:802) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:801 w:801) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:801 w:800) @@ -3416,16 +3593,16 @@ impl WeightInfo for () { /// Storage: `System::EventTopics` (r:803 w:803) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Balances::Holds` (r:800 w:800) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// The range of component `r` is `[1, 800]`. fn seal_instantiate(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `1281 + r * (255 ±0)` // Estimated: `9623 + r * (2731 ±0)` - // Minimum execution time: 641_845_000 picoseconds. - Weight::from_parts(649_908_000, 9623) - // Standard Error: 278_514 - .saturating_add(Weight::from_parts(361_164_598, 0).saturating_mul(r.into())) + // Minimum execution time: 656_480_000 picoseconds. + Weight::from_parts(668_579_000, 9623) + // Standard Error: 365_458 + .saturating_add(Weight::from_parts(379_238_223, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(14_u64)) .saturating_add(RocksDbWeight::get().reads((6_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(7_u64)) @@ -3436,6 +3613,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:3 w:3) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:2 w:2) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:2 w:1) @@ -3449,7 +3628,7 @@ impl WeightInfo for () { /// Storage: `System::EventTopics` (r:4 w:4) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// The range of component `t` is `[0, 1]`. /// The range of component `i` is `[0, 983040]`. /// The range of component `s` is `[0, 983040]`. @@ -3457,12 +3636,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1306 + t * (104 ±0)` // Estimated: `12214 + t * (2549 ±1)` - // Minimum execution time: 2_111_632_000 picoseconds. - Weight::from_parts(1_213_125_745, 12214) - // Standard Error: 22 - .saturating_add(Weight::from_parts(1_055, 0).saturating_mul(i.into())) - // Standard Error: 22 - .saturating_add(Weight::from_parts(1_192, 0).saturating_mul(s.into())) + // Minimum execution time: 2_148_964_000 picoseconds. + Weight::from_parts(1_557_685_999, 12214) + // Standard Error: 36 + .saturating_add(Weight::from_parts(864, 0).saturating_mul(i.into())) + // Standard Error: 36 + .saturating_add(Weight::from_parts(1_092, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(19_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes(11_u64)) @@ -3473,6 +3652,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3488,10 +3669,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `865 + r * (8 ±0)` // Estimated: `9279 + r * (8 ±0)` - // Minimum execution time: 264_227_000 picoseconds. - Weight::from_parts(281_015_995, 9279) - // Standard Error: 710 - .saturating_add(Weight::from_parts(365_734, 0).saturating_mul(r.into())) + // Minimum execution time: 279_377_000 picoseconds. + Weight::from_parts(287_951_287, 9279) + // Standard Error: 659 + .saturating_add(Weight::from_parts(376_476, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) @@ -3500,6 +3681,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3515,10 +3698,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `873` // Estimated: `9286` - // Minimum execution time: 270_709_000 picoseconds. - Weight::from_parts(268_416_051, 9286) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_080, 0).saturating_mul(n.into())) + // Minimum execution time: 276_151_000 picoseconds. + Weight::from_parts(267_656_959, 9286) + // Standard Error: 2 + .saturating_add(Weight::from_parts(1_108, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -3526,6 +3709,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3541,10 +3726,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `867 + r * (8 ±0)` // Estimated: `9284 + r * (8 ±0)` - // Minimum execution time: 267_283_000 picoseconds. - Weight::from_parts(280_706_659, 9284) - // Standard Error: 540 - .saturating_add(Weight::from_parts(790_312, 0).saturating_mul(r.into())) + // Minimum execution time: 275_247_000 picoseconds. + Weight::from_parts(286_675_317, 9284) + // Standard Error: 601 + .saturating_add(Weight::from_parts(788_160, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) @@ -3553,6 +3738,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3568,10 +3755,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `875` // Estimated: `9292` - // Minimum execution time: 262_010_000 picoseconds. - Weight::from_parts(273_278_744, 9292) + // Minimum execution time: 281_585_000 picoseconds. + Weight::from_parts(287_637_844, 9292) // Standard Error: 1 - .saturating_add(Weight::from_parts(3_362, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(3_351, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -3579,6 +3766,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3594,10 +3783,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `867 + r * (8 ±0)` // Estimated: `9286 + r * (8 ±0)` - // Minimum execution time: 268_698_000 picoseconds. - Weight::from_parts(282_890_578, 9286) - // Standard Error: 622 - .saturating_add(Weight::from_parts(441_131, 0).saturating_mul(r.into())) + // Minimum execution time: 273_678_000 picoseconds. + Weight::from_parts(289_879_306, 9286) + // Standard Error: 607 + .saturating_add(Weight::from_parts(439_482, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) @@ -3606,6 +3795,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3621,10 +3812,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `875` // Estimated: `9291` - // Minimum execution time: 252_846_000 picoseconds. - Weight::from_parts(267_657_561, 9291) + // Minimum execution time: 275_126_000 picoseconds. + Weight::from_parts(276_684_594, 9291) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_208, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_202, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -3632,6 +3823,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3647,10 +3840,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `867 + r * (8 ±0)` // Estimated: `9283 + r * (8 ±0)` - // Minimum execution time: 266_899_000 picoseconds. - Weight::from_parts(276_862_067, 9283) - // Standard Error: 1_249 - .saturating_add(Weight::from_parts(443_970, 0).saturating_mul(r.into())) + // Minimum execution time: 273_229_000 picoseconds. + Weight::from_parts(287_793_841, 9283) + // Standard Error: 451 + .saturating_add(Weight::from_parts(447_922, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) @@ -3659,6 +3852,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3674,10 +3869,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `875` // Estimated: `9289` - // Minimum execution time: 265_413_000 picoseconds. - Weight::from_parts(265_600_840, 9289) + // Minimum execution time: 277_843_000 picoseconds. + Weight::from_parts(279_900_099, 9289) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_203, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_204, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -3685,6 +3880,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3700,10 +3897,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1000 + n * (1 ±0)` // Estimated: `9412 + n * (1 ±0)` - // Minimum execution time: 328_341_000 picoseconds. - Weight::from_parts(341_038_581, 9412) - // Standard Error: 10 - .saturating_add(Weight::from_parts(5_863, 0).saturating_mul(n.into())) + // Minimum execution time: 331_840_000 picoseconds. + Weight::from_parts(338_767_191, 9412) + // Standard Error: 11 + .saturating_add(Weight::from_parts(5_971, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -3712,6 +3909,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3727,10 +3926,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `808 + r * (112 ±0)` // Estimated: `9226 + r * (112 ±0)` - // Minimum execution time: 272_730_000 picoseconds. - Weight::from_parts(339_349_232, 9226) - // Standard Error: 13_004 - .saturating_add(Weight::from_parts(41_649_026, 0).saturating_mul(r.into())) + // Minimum execution time: 277_912_000 picoseconds. + Weight::from_parts(344_538_960, 9226) + // Standard Error: 13_422 + .saturating_add(Weight::from_parts(41_592_887, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 112).saturating_mul(r.into())) @@ -3739,6 +3938,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3754,10 +3955,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `910 + r * (76 ±0)` // Estimated: `9279 + r * (77 ±0)` - // Minimum execution time: 273_892_000 picoseconds. - Weight::from_parts(352_853_960, 9279) - // Standard Error: 16_745 - .saturating_add(Weight::from_parts(45_853_294, 0).saturating_mul(r.into())) + // Minimum execution time: 280_383_000 picoseconds. + Weight::from_parts(348_542_377, 9279) + // Standard Error: 13_985 + .saturating_add(Weight::from_parts(45_983_827, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 77).saturating_mul(r.into())) @@ -3766,6 +3967,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3781,10 +3984,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `880 + r * (42 ±0)` // Estimated: `9294 + r * (42 ±0)` - // Minimum execution time: 268_431_000 picoseconds. - Weight::from_parts(319_727_796, 9294) - // Standard Error: 10_276 - .saturating_add(Weight::from_parts(11_928_882, 0).saturating_mul(r.into())) + // Minimum execution time: 277_764_000 picoseconds. + Weight::from_parts(320_288_180, 9294) + // Standard Error: 10_140 + .saturating_add(Weight::from_parts(12_046_137, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 42).saturating_mul(r.into())) @@ -3793,6 +3996,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1536 w:1536) @@ -3808,10 +4013,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0 + r * (965 ±0)` // Estimated: `9285 + r * (3090 ±7)` - // Minimum execution time: 275_482_000 picoseconds. - Weight::from_parts(279_155_000, 9285) - // Standard Error: 65_388 - .saturating_add(Weight::from_parts(26_634_187, 0).saturating_mul(r.into())) + // Minimum execution time: 271_356_000 picoseconds. + Weight::from_parts(282_924_000, 9285) + // Standard Error: 60_493 + .saturating_add(Weight::from_parts(28_319_267, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) @@ -3822,6 +4027,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:33 w:32) @@ -3837,20 +4044,22 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `937 + r * (131 ±0)` // Estimated: `9346 + r * (2607 ±0)` - // Minimum execution time: 270_001_000 picoseconds. - Weight::from_parts(286_792_689, 9346) - // Standard Error: 21_074 - .saturating_add(Weight::from_parts(6_465_885, 0).saturating_mul(r.into())) + // Minimum execution time: 269_698_000 picoseconds. + Weight::from_parts(294_325_127, 9346) + // Standard Error: 22_352 + .saturating_add(Weight::from_parts(6_744_117, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2606).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 2607).saturating_mul(r.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `MaxEncodedLen`) /// Storage: `Contracts::CodeInfoOf` (r:33 w:32) @@ -3864,12 +4073,12 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 32]`. fn unlock_delegate_dependency(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `969 + r * (183 ±0)` + // Measured: `972 + r * (184 ±0)` // Estimated: `129453 + r * (2568 ±0)` - // Minimum execution time: 270_652_000 picoseconds. - Weight::from_parts(293_369_452, 129453) - // Standard Error: 24_321 - .saturating_add(Weight::from_parts(5_575_600, 0).saturating_mul(r.into())) + // Minimum execution time: 261_226_000 picoseconds. + Weight::from_parts(294_299_527, 129453) + // Standard Error: 27_898 + .saturating_add(Weight::from_parts(6_031_601, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) @@ -3880,6 +4089,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3895,10 +4106,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `861 + r * (3 ±0)` // Estimated: `9282 + r * (3 ±0)` - // Minimum execution time: 267_749_000 picoseconds. - Weight::from_parts(280_373_341, 9282) - // Standard Error: 464 - .saturating_add(Weight::from_parts(170_398, 0).saturating_mul(r.into())) + // Minimum execution time: 270_729_000 picoseconds. + Weight::from_parts(289_622_807, 9282) + // Standard Error: 394 + .saturating_add(Weight::from_parts(167_010, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) @@ -3907,6 +4118,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3922,10 +4135,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2112 + r * (39 ±0)` // Estimated: `10377 + r * (40 ±0)` - // Minimum execution time: 270_260_000 picoseconds. - Weight::from_parts(337_969_172, 10377) - // Standard Error: 1_557 - .saturating_add(Weight::from_parts(305_735, 0).saturating_mul(r.into())) + // Minimum execution time: 272_228_000 picoseconds. + Weight::from_parts(351_059_276, 10377) + // Standard Error: 1_761 + .saturating_add(Weight::from_parts(312_269, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 40).saturating_mul(r.into())) @@ -3934,6 +4147,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3951,10 +4166,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `864 + r * (3 ±0)` // Estimated: `9279 + r * (3 ±0)` - // Minimum execution time: 265_607_000 picoseconds. - Weight::from_parts(283_396_630, 9279) - // Standard Error: 449 - .saturating_add(Weight::from_parts(149_018, 0).saturating_mul(r.into())) + // Minimum execution time: 272_497_000 picoseconds. + Weight::from_parts(288_213_060, 9279) + // Standard Error: 469 + .saturating_add(Weight::from_parts(155_530, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) @@ -3964,9 +4179,9 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_813_000 picoseconds. - Weight::from_parts(1_264_945, 0) - // Standard Error: 19 - .saturating_add(Weight::from_parts(15_098, 0).saturating_mul(r.into())) + // Minimum execution time: 1_990_000 picoseconds. + Weight::from_parts(1_778_221, 0) + // Standard Error: 26 + .saturating_add(Weight::from_parts(14_888, 0).saturating_mul(r.into())) } } diff --git a/substrate/frame/contracts/uapi/Cargo.toml b/substrate/frame/contracts/uapi/Cargo.toml index 12bb6b8fc2c0..d9a5ee14f054 100644 --- a/substrate/frame/contracts/uapi/Cargo.toml +++ b/substrate/frame/contracts/uapi/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] paste = { version = "1.0", default-features = false } bitflags = "1.0" -scale-info = { version = "2.10.0", default-features = false, features = ["derive"], optional = true } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"], optional = true } scale = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", "max-encoded-len", diff --git a/substrate/frame/contracts/uapi/src/host.rs b/substrate/frame/contracts/uapi/src/host.rs index 04f58895ab4f..459cb59bead9 100644 --- a/substrate/frame/contracts/uapi/src/host.rs +++ b/substrate/frame/contracts/uapi/src/host.rs @@ -790,7 +790,7 @@ pub trait HostFn { /// /// # Parameters /// - /// - `dest`: The XCM destination, should be decodable as [VersionedMultiLocation](https://paritytech.github.io/polkadot-sdk/master/staging_xcm/enum.VersionedMultiLocation.html), + /// - `dest`: The XCM destination, should be decodable as [MultiLocation](https://paritytech.github.io/polkadot-sdk/master/staging_xcm/enum.VersionedLocation.html), /// traps otherwise. /// - `msg`: The message, should be decodable as a [VersionedXcm](https://paritytech.github.io/polkadot-sdk/master/staging_xcm/enum.VersionedXcm.html), /// traps otherwise. diff --git a/substrate/frame/conviction-voting/Cargo.toml b/substrate/frame/conviction-voting/Cargo.toml index eb8a009ed2ad..5402e387dcfd 100644 --- a/substrate/frame/conviction-voting/Cargo.toml +++ b/substrate/frame/conviction-voting/Cargo.toml @@ -21,7 +21,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", "max-encoded-len", ] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/conviction-voting/src/tests.rs b/substrate/frame/conviction-voting/src/tests.rs index 5c582a35362a..74baeace898b 100644 --- a/substrate/frame/conviction-voting/src/tests.rs +++ b/substrate/frame/conviction-voting/src/tests.rs @@ -39,7 +39,7 @@ frame_support::construct_runtime!( } ); -// Test that a fitlered call can be dispatched. +// Test that a filtered call can be dispatched. pub struct BaseFilter; impl Contains for BaseFilter { fn contains(call: &RuntimeCall) -> bool { diff --git a/substrate/frame/core-fellowship/Cargo.toml b/substrate/frame/core-fellowship/Cargo.toml index d5f7347aad1e..055d7fc296ae 100644 --- a/substrate/frame/core-fellowship/Cargo.toml +++ b/substrate/frame/core-fellowship/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/core-fellowship/src/benchmarking.rs b/substrate/frame/core-fellowship/src/benchmarking.rs index ddde70bd7ce1..fd5453310be5 100644 --- a/substrate/frame/core-fellowship/src/benchmarking.rs +++ b/substrate/frame/core-fellowship/src/benchmarking.rs @@ -149,6 +149,11 @@ mod benchmarks { #[benchmark] fn promote() -> Result<(), BenchmarkError> { + // Ensure that the `min_promotion_period` wont get in our way. + let mut params = Params::::get(); + params.min_promotion_period = [Zero::zero(); RANK_COUNT]; + Params::::put(¶ms); + let member = make_member::(1)?; ensure_evidence::(&member)?; diff --git a/substrate/frame/core-fellowship/src/lib.rs b/substrate/frame/core-fellowship/src/lib.rs index 19b919ba34c1..92006d5eceda 100644 --- a/substrate/frame/core-fellowship/src/lib.rs +++ b/substrate/frame/core-fellowship/src/lib.rs @@ -152,7 +152,8 @@ pub mod pallet { }; use frame_system::{ensure_root, pallet_prelude::*}; - const RANK_COUNT: usize = 9; + /// Number of available ranks. + pub(crate) const RANK_COUNT: usize = 9; #[pallet::pallet] pub struct Pallet(PhantomData<(T, I)>); diff --git a/substrate/frame/core-fellowship/src/tests/integration.rs b/substrate/frame/core-fellowship/src/tests/integration.rs index 3c2dc1de5955..d3bbac158056 100644 --- a/substrate/frame/core-fellowship/src/tests/integration.rs +++ b/substrate/frame/core-fellowship/src/tests/integration.rs @@ -251,7 +251,7 @@ fn swap_exhaustive_works() { }); assert_eq!(root_add, root_swap); - // Ensure that we dont compare trivial stuff like `()` from a type error above. + // Ensure that we don't compare trivial stuff like `()` from a type error above. assert_eq!(root_add.len(), 32); }); } diff --git a/substrate/frame/democracy/Cargo.toml b/substrate/frame/democracy/Cargo.toml index e38ff8c077c5..164758b79fe3 100644 --- a/substrate/frame/democracy/Cargo.toml +++ b/substrate/frame/democracy/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/democracy/src/lib.rs b/substrate/frame/democracy/src/lib.rs index 566d1fbbf6e3..575f5029752d 100644 --- a/substrate/frame/democracy/src/lib.rs +++ b/substrate/frame/democracy/src/lib.rs @@ -486,7 +486,7 @@ pub mod pallet { Blacklisted { proposal_hash: T::Hash }, /// An account has voted in a referendum Voted { voter: T::AccountId, ref_index: ReferendumIndex, vote: AccountVote> }, - /// An account has secconded a proposal + /// An account has seconded a proposal Seconded { seconder: T::AccountId, prop_index: PropIndex }, /// A proposal got canceled. ProposalCanceled { prop_index: PropIndex }, diff --git a/substrate/frame/democracy/src/migrations/unlock_and_unreserve_all_funds.rs b/substrate/frame/democracy/src/migrations/unlock_and_unreserve_all_funds.rs index 45d719b1ae60..eb8fdb8700c8 100644 --- a/substrate/frame/democracy/src/migrations/unlock_and_unreserve_all_funds.rs +++ b/substrate/frame/democracy/src/migrations/unlock_and_unreserve_all_funds.rs @@ -323,40 +323,40 @@ mod test { } #[test] - fn unreserve_works_for_depositer() { - let depositer_0 = 10; - let depositer_1 = 11; + fn unreserve_works_for_depositor() { + let depositor_0 = 10; + let depositor_1 = 11; let deposit = 25; - let depositer_0_initial_reserved = 0; - let depositer_1_initial_reserved = 15; + let depositor_0_initial_reserved = 0; + let depositor_1_initial_reserved = 15; let initial_balance = 100_000; new_test_ext().execute_with(|| { // Set up initial state. - ::Currency::make_free_balance_be(&depositer_0, initial_balance); - ::Currency::make_free_balance_be(&depositer_1, initial_balance); + ::Currency::make_free_balance_be(&depositor_0, initial_balance); + ::Currency::make_free_balance_be(&depositor_1, initial_balance); assert_ok!(::Currency::reserve( - &depositer_0, - depositer_0_initial_reserved + deposit + &depositor_0, + depositor_0_initial_reserved + deposit )); assert_ok!(::Currency::reserve( - &depositer_1, - depositer_1_initial_reserved + deposit + &depositor_1, + depositor_1_initial_reserved + deposit )); let depositors = BoundedVec::<_, ::MaxDeposits>::truncate_from(vec![ - depositer_0, - depositer_1, + depositor_0, + depositor_1, ]); DepositOf::::insert(0, (depositors, deposit)); // Sanity check: ensure initial reserved balance was set correctly. assert_eq!( - ::Currency::reserved_balance(&depositer_0), - depositer_0_initial_reserved + deposit + ::Currency::reserved_balance(&depositor_0), + depositor_0_initial_reserved + deposit ); assert_eq!( - ::Currency::reserved_balance(&depositer_1), - depositer_1_initial_reserved + deposit + ::Currency::reserved_balance(&depositor_1), + depositor_1_initial_reserved + deposit ); // Run the migration. @@ -367,12 +367,12 @@ mod test { // Assert the reserved balance was reduced by the expected amount. assert_eq!( - ::Currency::reserved_balance(&depositer_0), - depositer_0_initial_reserved + ::Currency::reserved_balance(&depositor_0), + depositor_0_initial_reserved ); assert_eq!( - ::Currency::reserved_balance(&depositer_1), - depositer_1_initial_reserved + ::Currency::reserved_balance(&depositor_1), + depositor_1_initial_reserved ); }); } diff --git a/substrate/frame/democracy/src/tests.rs b/substrate/frame/democracy/src/tests.rs index dd69f48dbc18..e2946ba98156 100644 --- a/substrate/frame/democracy/src/tests.rs +++ b/substrate/frame/democracy/src/tests.rs @@ -62,7 +62,7 @@ frame_support::construct_runtime!( } ); -// Test that a fitlered call can be dispatched. +// Test that a filtered call can be dispatched. pub struct BaseFilter; impl Contains for BaseFilter { fn contains(call: &RuntimeCall) -> bool { diff --git a/substrate/frame/democracy/src/tests/cancellation.rs b/substrate/frame/democracy/src/tests/cancellation.rs index 4384fe6a1641..b4c42f9c7905 100644 --- a/substrate/frame/democracy/src/tests/cancellation.rs +++ b/substrate/frame/democracy/src/tests/cancellation.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! The tests for cancelation functionality. +//! The tests for cancellation functionality. use super::*; diff --git a/substrate/frame/election-provider-multi-phase/Cargo.toml b/substrate/frame/election-provider-multi-phase/Cargo.toml index 87186d08da39..0169da9c1739 100644 --- a/substrate/frame/election-provider-multi-phase/Cargo.toml +++ b/substrate/frame/election-provider-multi-phase/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = [ +scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } log = { workspace = true } @@ -37,7 +37,7 @@ frame-election-provider-support = { path = "../election-provider-support", defau frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } pallet-election-provider-support-benchmarking = { path = "../election-provider-support/benchmarking", default-features = false, optional = true } rand = { version = "0.8.5", default-features = false, features = ["alloc", "small_rng"], optional = true } -strum = { version = "0.24.1", default-features = false, features = ["derive"], optional = true } +strum = { version = "0.26.2", default-features = false, features = ["derive"], optional = true } [dev-dependencies] parking_lot = "0.12.1" diff --git a/substrate/frame/election-provider-multi-phase/src/helpers.rs b/substrate/frame/election-provider-multi-phase/src/helpers.rs index a346d4f917fe..e7035e40df8b 100644 --- a/substrate/frame/election-provider-multi-phase/src/helpers.rs +++ b/substrate/frame/election-provider-multi-phase/src/helpers.rs @@ -162,7 +162,7 @@ pub fn target_index_fn_linear( } /// Create a function that can map a voter index ([`SolutionVoterIndexOf`]) to the actual voter -/// account using a linearly indexible snapshot. +/// account using a linearly indexable snapshot. pub fn voter_at_fn( snapshot: &Vec>, ) -> impl Fn(SolutionVoterIndexOf) -> Option + '_ { @@ -174,7 +174,7 @@ pub fn voter_at_fn( } /// Create a function that can map a target index ([`SolutionTargetIndexOf`]) to the actual target -/// account using a linearly indexible snapshot. +/// account using a linearly indexable snapshot. pub fn target_at_fn( snapshot: &Vec, ) -> impl Fn(SolutionTargetIndexOf) -> Option + '_ { diff --git a/substrate/frame/election-provider-multi-phase/src/lib.rs b/substrate/frame/election-provider-multi-phase/src/lib.rs index 6f7d30e59334..10d2dc9069e0 100644 --- a/substrate/frame/election-provider-multi-phase/src/lib.rs +++ b/substrate/frame/election-provider-multi-phase/src/lib.rs @@ -588,10 +588,8 @@ pub mod pallet { type EstimateCallFee: EstimateCallFee, BalanceOf>; /// Duration of the unsigned phase. - #[pallet::constant] type UnsignedPhase: Get>; /// Duration of the signed phase. - #[pallet::constant] type SignedPhase: Get>; /// The minimum amount of improvement to the solution score that defines a solution as @@ -1136,7 +1134,7 @@ pub mod pallet { /// A solution was stored with the given compute. /// /// The `origin` indicates the origin of the solution. If `origin` is `Some(AccountId)`, - /// the stored solution was submited in the signed phase by a miner with the `AccountId`. + /// the stored solution was submitted in the signed phase by a miner with the `AccountId`. /// Otherwise, the solution was stored either during the unsigned phase or by /// `T::ForceOrigin`. The `bool` is `true` when a previous solution was ejected to make /// room for this one. @@ -1194,7 +1192,7 @@ pub mod pallet { BoundNotMet, /// Submitted solution has too many winners TooManyWinners, - /// Sumission was prepared for a different round. + /// Submission was prepared for a different round. PreDispatchDifferentRound, } diff --git a/substrate/frame/election-provider-multi-phase/src/unsigned.rs b/substrate/frame/election-provider-multi-phase/src/unsigned.rs index c17dfdde04af..497a8b00f07f 100644 --- a/substrate/frame/election-provider-multi-phase/src/unsigned.rs +++ b/substrate/frame/election-provider-multi-phase/src/unsigned.rs @@ -1446,7 +1446,7 @@ mod tests { ) .unwrap(); let solution = RawSolution { solution: raw, score, round: MultiPhase::round() }; - // 12 is not better than 12. We need score of atleast 13 to be accepted. + // 12 is not better than 12. We need score of at least 13 to be accepted. assert_eq!(solution.score.minimal_stake, 12); // submitting this will panic. assert_noop!( @@ -1484,7 +1484,7 @@ mod tests { )); // trial 4: a solution who's minimal stake is 17, i.e. 4 better than the last - // soluton. + // solution. let result = ElectionResult { winners: vec![(10, 12)], assignments: vec![ diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml b/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml index ecb9c63459ea..d50bf2567998 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dev-dependencies] parking_lot = "0.12.1" codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } -scale-info = { version = "2.10.0", features = ["derive"] } +scale-info = { version = "2.11.1", features = ["derive"] } log = { workspace = true } sp-runtime = { path = "../../../primitives/runtime" } diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs index 2d6fc2c27741..9b942de45965 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs @@ -211,8 +211,8 @@ fn enters_emergency_phase_after_forcing_before_elect() { /// active validators. Thus, slashing a percentage of the current validators that is lower than /// `OffendingValidatorsThreshold` will never force a new era. However, as the slashes progress, if /// the subsequent elections do not meet the minimum election untrusted score, the election will -/// fail and enter in emenergency mode. -fn continous_slashes_below_offending_threshold() { +/// fail and enter in emergency mode. +fn continuous_slashes_below_offending_threshold() { let staking_builder = StakingExtBuilder::default().validator_count(10); let epm_builder = EpmExtBuilder::default().disable_emergency_throttling(); @@ -326,7 +326,7 @@ fn set_validation_intention_after_chilled() { } #[test] -/// Active ledger balance may fall below ED if account chills before unbounding. +/// Active ledger balance may fall below ED if account chills before unbonding. /// /// Unbonding call fails if the remaining ledger's stash balance falls below the existential /// deposit. However, if the stash is chilled before unbonding, the ledger's active balance may @@ -353,7 +353,7 @@ fn ledger_consistency_active_balance_below_ed() { // however, chilling works as expected. assert_ok!(Staking::chill(RuntimeOrigin::signed(11))); - // now unbonding the full active balance works, since remainer of the active balance is + // now unbonding the full active balance works, since remainder of the active balance is // not enforced to be below `MinNominatorBond` if the stash has been chilled. assert_ok!(Staking::unbond(RuntimeOrigin::signed(11), 1000)); @@ -482,7 +482,7 @@ fn automatic_unbonding_pools() { staking_events(), [ // auto-withdraw happened as expected to release 2's unbonding funds, but the funds - // were not transfered to 2 and stay in the pool's tranferrable balance instead. + // were not transferred to 2 and stay in the pool's transferrable balance instead. pallet_staking::Event::Withdrawn { stash: 7939698191839293293, amount: 10 }, pallet_staking::Event::Unbonded { stash: 7939698191839293293, amount: 10 } ] diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs index 67f0480a7c58..5d1d4221e6c2 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs @@ -166,7 +166,7 @@ parameter_types! { pub static SignedPhase: BlockNumber = 10; pub static UnsignedPhase: BlockNumber = 10; // we expect a minimum of 3 blocks in signed phase and unsigned phases before trying - // enetering in emergency phase after the election failed. + // entering in emergency phase after the election failed. pub static MinBlocksBeforeEmergency: BlockNumber = 3; pub static MaxActiveValidators: u32 = 1000; pub static OffchainRepeat: u32 = 5; @@ -660,7 +660,7 @@ pub fn roll_to(n: BlockNumber, delay_solution: bool) { Session::on_initialize(b); Timestamp::set_timestamp(System::block_number() * BLOCK_TIME + INIT_TIMESTAMP); - // TODO(gpestana): implement a realistic OCW worker insted of simulating it + // TODO(gpestana): implement a realistic OCW worker instead of simulating it // https://github.com/paritytech/substrate/issues/13589 // if there's no solution queued and the solution should not be delayed, try mining and // queue a solution. diff --git a/substrate/frame/election-provider-support/Cargo.toml b/substrate/frame/election-provider-support/Cargo.toml index a137a2d31e39..44970f5704ba 100644 --- a/substrate/frame/election-provider-support/Cargo.toml +++ b/substrate/frame/election-provider-support/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-election-provider-solution-type = { path = "solution-type" } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/election-provider-support/solution-type/Cargo.toml b/substrate/frame/election-provider-support/solution-type/Cargo.toml index 1bf1165229a7..09c6a492dd0d 100644 --- a/substrate/frame/election-provider-support/solution-type/Cargo.toml +++ b/substrate/frame/election-provider-support/solution-type/Cargo.toml @@ -25,7 +25,7 @@ proc-macro-crate = "3.0.0" [dev-dependencies] parity-scale-codec = "3.6.1" -scale-info = "2.10.0" +scale-info = "2.11.1" sp-arithmetic = { path = "../../../primitives/arithmetic" } # used by generate_solution_type: frame-election-provider-support = { path = ".." } diff --git a/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml b/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml index a27f0f7d4dd4..1fb9e2387ed8 100644 --- a/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml +++ b/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml @@ -21,7 +21,7 @@ honggfuzz = "0.5" rand = { version = "0.8", features = ["small_rng", "std"] } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-election-provider-solution-type = { path = ".." } frame-election-provider-support = { path = "../.." } sp-arithmetic = { path = "../../../../primitives/arithmetic" } diff --git a/substrate/frame/election-provider-support/src/bounds.rs b/substrate/frame/election-provider-support/src/bounds.rs index b9ae21e49ca7..6b2423b7fece 100644 --- a/substrate/frame/election-provider-support/src/bounds.rs +++ b/substrate/frame/election-provider-support/src/bounds.rs @@ -62,7 +62,7 @@ use sp_runtime::traits::Zero; /// Encapsulates the counting of things that can be bounded in an election, such as voters, /// targets or anything else. /// -/// This struct is defined mostly to prevent callers from mistankingly using `CountBound` instead of +/// This struct is defined mostly to prevent callers from mistakenly using `CountBound` instead of /// `SizeBound` and vice-versa. #[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)] pub struct CountBound(pub u32); @@ -96,7 +96,7 @@ impl Zero for CountBound { /// logic and implementation, but it most likely will represent bytes in SCALE encoding in this /// context. /// -/// This struct is defined mostly to prevent callers from mistankingly using `CountBound` instead of +/// This struct is defined mostly to prevent callers from mistakenly using `CountBound` instead of /// `SizeBound` and vice-versa. #[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)] pub struct SizeBound(pub u32); diff --git a/substrate/frame/elections-phragmen/Cargo.toml b/substrate/frame/elections-phragmen/Cargo.toml index 907008db843b..1370ff36a371 100644 --- a/substrate/frame/elections-phragmen/Cargo.toml +++ b/substrate/frame/elections-phragmen/Cargo.toml @@ -20,7 +20,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/examples/basic/Cargo.toml b/substrate/frame/examples/basic/Cargo.toml index a5f484c0b3a3..60f5914bafd5 100644 --- a/substrate/frame/examples/basic/Cargo.toml +++ b/substrate/frame/examples/basic/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } frame-support = { path = "../../support", default-features = false } frame-system = { path = "../../system", default-features = false } diff --git a/substrate/frame/examples/default-config/Cargo.toml b/substrate/frame/examples/default-config/Cargo.toml index d412f4019847..843d0b29f97b 100644 --- a/substrate/frame/examples/default-config/Cargo.toml +++ b/substrate/frame/examples/default-config/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../support", default-features = false } frame-system = { path = "../../system", default-features = false } diff --git a/substrate/frame/examples/dev-mode/Cargo.toml b/substrate/frame/examples/dev-mode/Cargo.toml index 8f6fa72b7ad9..b01f2628da80 100644 --- a/substrate/frame/examples/dev-mode/Cargo.toml +++ b/substrate/frame/examples/dev-mode/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../support", default-features = false } frame-system = { path = "../../system", default-features = false } pallet-balances = { path = "../../balances", default-features = false } diff --git a/substrate/frame/examples/frame-crate/Cargo.toml b/substrate/frame/examples/frame-crate/Cargo.toml index 93a46ba7b249..3a0e4f720f95 100644 --- a/substrate/frame/examples/frame-crate/Cargo.toml +++ b/substrate/frame/examples/frame-crate/Cargo.toml @@ -17,9 +17,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame = { path = "../..", default-features = false, features = ["experimental", "runtime"] } +frame = { package = "polkadot-sdk-frame", path = "../..", default-features = false, features = ["experimental", "runtime"] } [features] diff --git a/substrate/frame/examples/kitchensink/Cargo.toml b/substrate/frame/examples/kitchensink/Cargo.toml index eb70bd8d67dd..69f2c4923fa7 100644 --- a/substrate/frame/examples/kitchensink/Cargo.toml +++ b/substrate/frame/examples/kitchensink/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../support", default-features = false, features = ["experimental"] } frame-system = { path = "../../system", default-features = false } diff --git a/substrate/frame/examples/offchain-worker/Cargo.toml b/substrate/frame/examples/offchain-worker/Cargo.toml index 508ac1ea1868..459ecdf935d6 100644 --- a/substrate/frame/examples/offchain-worker/Cargo.toml +++ b/substrate/frame/examples/offchain-worker/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } lite-json = { version = "0.2.0", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../support", default-features = false } frame-system = { path = "../../system", default-features = false } sp-core = { path = "../../../primitives/core", default-features = false } diff --git a/substrate/frame/examples/offchain-worker/src/lib.rs b/substrate/frame/examples/offchain-worker/src/lib.rs index 526b364ed9d8..c7c6ec38879b 100644 --- a/substrate/frame/examples/offchain-worker/src/lib.rs +++ b/substrate/frame/examples/offchain-worker/src/lib.rs @@ -498,7 +498,7 @@ impl Pallet { // Here we showcase two ways to send an unsigned transaction / unsigned payload (raw) // // By default unsigned transactions are disallowed, so we need to whitelist this case - // by writing `UnsignedValidator`. Note that it's EXTREMELY important to carefuly + // by writing `UnsignedValidator`. Note that it's EXTREMELY important to carefully // implement unsigned validation logic, as any mistakes can lead to opening DoS or spam // attack vectors. See validation logic docs for more details. // diff --git a/substrate/frame/examples/single-block-migrations/Cargo.toml b/substrate/frame/examples/single-block-migrations/Cargo.toml index 3ba72e88668b..ffd01cebfaba 100644 --- a/substrate/frame/examples/single-block-migrations/Cargo.toml +++ b/substrate/frame/examples/single-block-migrations/Cargo.toml @@ -13,10 +13,10 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -docify = { version = "0.2.3", default-features = false } +docify = "0.2.8" log = { version = "0.4.21", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../support", default-features = false } frame-executive = { path = "../../executive", default-features = false } frame-system = { path = "../../system", default-features = false } diff --git a/substrate/frame/examples/single-block-migrations/src/lib.rs b/substrate/frame/examples/single-block-migrations/src/lib.rs index b811c2a50cb5..0aab9a6394df 100644 --- a/substrate/frame/examples/single-block-migrations/src/lib.rs +++ b/substrate/frame/examples/single-block-migrations/src/lib.rs @@ -20,7 +20,7 @@ //! An example pallet demonstrating best-practices for writing single-block migrations in the //! context of upgrading pallet storage. //! -//! ## Forwarning +//! ## Forewarning //! //! Single block migrations **MUST** execute in a single block, therefore when executed on a //! parachain are only appropriate when guaranteed to not exceed block weight limits. If a @@ -66,7 +66,7 @@ //! //! ## Adding a migration module //! -//! Writing a pallets migrations in a seperate module is strongly recommended. +//! Writing a pallets migrations in a separate module is strongly recommended. //! //! Here's how the migration module is defined for this pallet: //! @@ -89,7 +89,7 @@ //! //! See the migration source code for detailed comments. //! -//! To keep the migration logic organised, it is split across additional modules: +//! Here's a brief overview of modules and types defined in `v1.rs`: //! //! ### `mod v0` //! @@ -98,28 +98,29 @@ //! //! This allows reading the old v0 value from storage during the migration. //! -//! ### `mod version_unchecked` +//! ### `InnerMigrateV0ToV1` //! //! Here we define our raw migration logic, -//! `version_unchecked::MigrateV0ToV1` which implements the [`OnRuntimeUpgrade`] trait. +//! `InnerMigrateV0ToV1` which implements the [`UncheckedOnRuntimeUpgrade`] trait. //! -//! Importantly, it is kept in a private module so that it cannot be accidentally used in a runtime. +//! #### Why [`UncheckedOnRuntimeUpgrade`]? //! -//! Private modules cannot be referenced in docs, so please read the code directly. +//! Otherwise, we would have two implementations of [`OnRuntimeUpgrade`] which could be confusing, +//! and may lead to accidentally using the wrong one. //! //! #### Standalone Struct or Pallet Hook? //! //! Note that the storage migration logic is attached to a standalone struct implementing -//! [`OnRuntimeUpgrade`], rather than implementing the +//! [`UncheckedOnRuntimeUpgrade`], rather than implementing the //! [`Hooks::on_runtime_upgrade`](frame_support::traits::Hooks::on_runtime_upgrade) hook directly on //! the pallet. The pallet hook is better suited for special types of logic that need to execute on //! every runtime upgrade, but not so much for one-off storage migrations. //! -//! ### `pub mod versioned` +//! ### `MigrateV0ToV1` //! -//! Here, `version_unchecked::MigrateV0ToV1` is wrapped in a +//! Here, `InnerMigrateV0ToV1` is wrapped in a //! [`VersionedMigration`] to define -//! [`versioned::MigrateV0ToV1`](crate::migrations::v1::versioned::MigrateV0ToV1), which may be used +//! [`MigrateV0ToV1`](crate::migrations::v1::MigrateV0ToV1), which may be used //! in runtimes. //! //! Using [`VersionedMigration`] ensures that @@ -128,8 +129,6 @@ //! - Reads and writes from checking and setting the on-chain storage version are accounted for in //! the final [`Weight`](frame_support::weights::Weight) //! -//! This is the only public module exported from `v1`. -//! //! ### `mod test` //! //! Here basic unit tests are defined for the migration. @@ -142,7 +141,8 @@ //! [`VersionedMigration`]: frame_support::migrations::VersionedMigration //! [`GetStorageVersion`]: frame_support::traits::GetStorageVersion //! [`OnRuntimeUpgrade`]: frame_support::traits::OnRuntimeUpgrade -//! [`MigrateV0ToV1`]: crate::migrations::v1::versioned::MigrationV0ToV1 +//! [`UncheckedOnRuntimeUpgrade`]: frame_support::traits::UncheckedOnRuntimeUpgrade +//! [`MigrateV0ToV1`]: crate::migrations::v1::MigrateV0ToV1 // We make sure this pallet uses `no_std` for compiling to Wasm. #![cfg_attr(not(feature = "std"), no_std)] diff --git a/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs b/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs index de6efb7d4503..f33685bf8d49 100644 --- a/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs +++ b/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs @@ -17,7 +17,7 @@ use frame_support::{ storage_alias, - traits::{Get, OnRuntimeUpgrade}, + traits::{Get, UncheckedOnRuntimeUpgrade}, }; #[cfg(feature = "try-runtime")] @@ -34,118 +34,95 @@ mod v0 { pub type Value = StorageValue, u32>; } -/// Private module containing *version unchecked* migration logic. +/// Implements [`UncheckedOnRuntimeUpgrade`], migrating the state of this pallet from V0 to V1. /// -/// Should only be used by the [`VersionedMigration`](frame_support::migrations::VersionedMigration) -/// type in this module to create something to export. +/// In V0 of the template [`crate::Value`] is just a `u32`. In V1, it has been upgraded to +/// contain the struct [`crate::CurrentAndPreviousValue`]. /// -/// The unversioned migration should be kept private so the unversioned migration cannot -/// accidentally be used in any runtimes. -/// -/// For more about this pattern of keeping items private, see -/// - -/// - -mod version_unchecked { - use super::*; +/// In this migration, update the on-chain storage for the pallet to reflect the new storage +/// layout. +pub struct InnerMigrateV0ToV1(sp_std::marker::PhantomData); + +impl UncheckedOnRuntimeUpgrade for InnerMigrateV0ToV1 { + /// Return the existing [`crate::Value`] so we can check that it was correctly set in + /// `InnerMigrateV0ToV1::post_upgrade`. + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + use codec::Encode; + + // Access the old value using the `storage_alias` type + let old_value = v0::Value::::get(); + // Return it as an encoded `Vec` + Ok(old_value.encode()) + } - /// Implements [`OnRuntimeUpgrade`], migrating the state of this pallet from V0 to V1. + /// Migrate the storage from V0 to V1. /// - /// In V0 of the template [`crate::Value`] is just a `u32`. In V1, it has been upgraded to - /// contain the struct [`crate::CurrentAndPreviousValue`]. + /// - If the value doesn't exist, there is nothing to do. + /// - If the value exists, it is read and then written back to storage inside a + /// [`crate::CurrentAndPreviousValue`]. + fn on_runtime_upgrade() -> frame_support::weights::Weight { + // Read the old value from storage + if let Some(old_value) = v0::Value::::take() { + // Write the new value to storage + let new = crate::CurrentAndPreviousValue { current: old_value, previous: None }; + crate::Value::::put(new); + // One read for the old value, one write for the new value + T::DbWeight::get().reads_writes(1, 1) + } else { + // One read for trying to access the old value + T::DbWeight::get().reads(1) + } + } + + /// Verifies the storage was migrated correctly. /// /// In this migration, update the on-chain storage for the pallet to reflect the new storage /// layout. pub struct MigrateV0ToV1(core::marker::PhantomData); - - impl OnRuntimeUpgrade for MigrateV0ToV1 { - /// Return the existing [`crate::Value`] so we can check that it was correctly set in - /// `version_unchecked::MigrateV0ToV1::post_upgrade`. - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { - use codec::Encode; - - // Access the old value using the `storage_alias` type - let old_value = v0::Value::::get(); - // Return it as an encoded `Vec` - Ok(old_value.encode()) - } - - /// Migrate the storage from V0 to V1. - /// - /// - If the value doesn't exist, there is nothing to do. - /// - If the value exists, it is read and then written back to storage inside a - /// [`crate::CurrentAndPreviousValue`]. - fn on_runtime_upgrade() -> frame_support::weights::Weight { - // Read the old value from storage - if let Some(old_value) = v0::Value::::take() { - // Write the new value to storage - let new = crate::CurrentAndPreviousValue { current: old_value, previous: None }; - crate::Value::::put(new); - // One read for the old value, one write for the new value - T::DbWeight::get().reads_writes(1, 1) - } else { - // One read for trying to access the old value - T::DbWeight::get().reads(1) - } - } - - /// Verifies the storage was migrated correctly. - /// - /// - If there was no old value, the new value should not be set. - /// - If there was an old value, the new value should be a - /// [`crate::CurrentAndPreviousValue`]. - #[cfg(feature = "try-runtime")] - fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { - use codec::Decode; - use frame_support::ensure; - - let maybe_old_value = Option::::decode(&mut &state[..]).map_err(|_| { - sp_runtime::TryRuntimeError::Other("Failed to decode old value from storage") - })?; - - match maybe_old_value { - Some(old_value) => { - let expected_new_value = - crate::CurrentAndPreviousValue { current: old_value, previous: None }; - let actual_new_value = crate::Value::::get(); - - ensure!(actual_new_value.is_some(), "New value not set"); - ensure!( - actual_new_value == Some(expected_new_value), - "New value not set correctly" - ); - }, - None => { - ensure!(crate::Value::::get().is_none(), "New value unexpectedly set"); - }, - }; - Ok(()) - } + /// - If there was no old value, the new value should not be set. + /// - If there was an old value, the new value should be a [`crate::CurrentAndPreviousValue`]. + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + use codec::Decode; + use frame_support::ensure; + + let maybe_old_value = Option::::decode(&mut &state[..]).map_err(|_| { + sp_runtime::TryRuntimeError::Other("Failed to decode old value from storage") + })?; + + match maybe_old_value { + Some(old_value) => { + let expected_new_value = + crate::CurrentAndPreviousValue { current: old_value, previous: None }; + let actual_new_value = crate::Value::::get(); + + ensure!(actual_new_value.is_some(), "New value not set"); + ensure!( + actual_new_value == Some(expected_new_value), + "New value not set correctly" + ); + }, + None => { + ensure!(crate::Value::::get().is_none(), "New value unexpectedly set"); + }, + }; + Ok(()) } } -/// Public module containing *version checked* migration logic. -/// -/// This is the only module that should be exported from this module. -/// -/// See [`VersionedMigration`](frame_support::migrations::VersionedMigration) docs for more about -/// how it works. -pub mod versioned { - use super::*; - - /// `version_unchecked::MigrateV0ToV1` wrapped in a - /// [`VersionedMigration`](frame_support::migrations::VersionedMigration), which ensures that: - /// - The migration only runs once when the on-chain storage version is 0 - /// - The on-chain storage version is updated to `1` after the migration executes - /// - Reads/Writes from checking/settings the on-chain storage version are accounted for - pub type MigrateV0ToV1 = frame_support::migrations::VersionedMigration< - 0, // The migration will only execute when the on-chain storage version is 0 - 1, // The on-chain storage version will be set to 1 after the migration is complete - version_unchecked::MigrateV0ToV1, - crate::pallet::Pallet, - ::DbWeight, - >; -} +/// [`UncheckedOnRuntimeUpgrade`] implementation [`InnerMigrateV0ToV1`] wrapped in a +/// [`VersionedMigration`](frame_support::migrations::VersionedMigration), which ensures that: +/// - The migration only runs once when the on-chain storage version is 0 +/// - The on-chain storage version is updated to `1` after the migration executes +/// - Reads/Writes from checking/settings the on-chain storage version are accounted for +pub type MigrateV0ToV1 = frame_support::migrations::VersionedMigration< + 0, // The migration will only execute when the on-chain storage version is 0 + 1, // The on-chain storage version will be set to 1 after the migration is complete + InnerMigrateV0ToV1, + crate::pallet::Pallet, + ::DbWeight, +>; /// Tests for our migration. /// @@ -155,10 +132,10 @@ pub mod versioned { /// 3. The storage is in the expected state after the migration #[cfg(any(all(feature = "try-runtime", test), doc))] mod test { + use self::InnerMigrateV0ToV1; use super::*; use crate::mock::{new_test_ext, MockRuntime}; use frame_support::assert_ok; - use version_unchecked::MigrateV0ToV1; #[test] fn handles_no_existing_value() { @@ -168,16 +145,16 @@ mod test { assert!(v0::Value::::get().is_none()); // Get the pre_upgrade bytes - let bytes = match MigrateV0ToV1::::pre_upgrade() { + let bytes = match InnerMigrateV0ToV1::::pre_upgrade() { Ok(bytes) => bytes, Err(e) => panic!("pre_upgrade failed: {:?}", e), }; // Execute the migration - let weight = MigrateV0ToV1::::on_runtime_upgrade(); + let weight = InnerMigrateV0ToV1::::on_runtime_upgrade(); // Verify post_upgrade succeeds - assert_ok!(MigrateV0ToV1::::post_upgrade(bytes)); + assert_ok!(InnerMigrateV0ToV1::::post_upgrade(bytes)); // The weight should be just 1 read for trying to access the old value. assert_eq!(weight, ::DbWeight::get().reads(1)); @@ -195,16 +172,16 @@ mod test { v0::Value::::put(initial_value); // Get the pre_upgrade bytes - let bytes = match MigrateV0ToV1::::pre_upgrade() { + let bytes = match InnerMigrateV0ToV1::::pre_upgrade() { Ok(bytes) => bytes, Err(e) => panic!("pre_upgrade failed: {:?}", e), }; // Execute the migration - let weight = MigrateV0ToV1::::on_runtime_upgrade(); + let weight = InnerMigrateV0ToV1::::on_runtime_upgrade(); // Verify post_upgrade succeeds - assert_ok!(MigrateV0ToV1::::post_upgrade(bytes)); + assert_ok!(InnerMigrateV0ToV1::::post_upgrade(bytes)); // The weight used should be 1 read for the old value, and 1 write for the new // value. diff --git a/substrate/frame/examples/split/Cargo.toml b/substrate/frame/examples/split/Cargo.toml index f28fda8f7252..43de083e9078 100644 --- a/substrate/frame/examples/split/Cargo.toml +++ b/substrate/frame/examples/split/Cargo.toml @@ -6,7 +6,7 @@ edition.workspace = true license = "MIT-0" homepage = "https://substrate.io" repository.workspace = true -description = "FRAME example splitted pallet" +description = "FRAME example split pallet" readme = "README.md" [lints] @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../support", default-features = false } frame-system = { path = "../../system", default-features = false } diff --git a/substrate/frame/examples/tasks/Cargo.toml b/substrate/frame/examples/tasks/Cargo.toml index 291537fabd21..b7b261c2de3a 100644 --- a/substrate/frame/examples/tasks/Cargo.toml +++ b/substrate/frame/examples/tasks/Cargo.toml @@ -5,7 +5,7 @@ authors.workspace = true edition.workspace = true license.workspace = true repository.workspace = true -description = "Pallet to demonstrate the usage of Tasks to recongnize and execute service work" +description = "Pallet to demonstrate the usage of Tasks to recognize and execute service work" [lints] workspace = true @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../support", default-features = false } frame-system = { path = "../../system", default-features = false } diff --git a/substrate/frame/executive/Cargo.toml b/substrate/frame/executive/Cargo.toml index a89f65aa9f72..fed5f6f1f1ec 100644 --- a/substrate/frame/executive/Cargo.toml +++ b/substrate/frame/executive/Cargo.toml @@ -21,7 +21,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } frame-try-runtime = { path = "../try-runtime", default-features = false, optional = true } diff --git a/substrate/frame/fast-unstake/Cargo.toml b/substrate/frame/fast-unstake/Cargo.toml index 467db4cb9df4..8d365ed08450 100644 --- a/substrate/frame/fast-unstake/Cargo.toml +++ b/substrate/frame/fast-unstake/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } @@ -29,7 +29,7 @@ frame-election-provider-support = { path = "../election-provider-support", defau frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -docify = "0.2.7" +docify = "0.2.8" [dev-dependencies] pallet-staking-reward-curve = { path = "../staking/reward-curve" } diff --git a/substrate/frame/fast-unstake/src/tests.rs b/substrate/frame/fast-unstake/src/tests.rs index b19fe3b8c463..77128872f285 100644 --- a/substrate/frame/fast-unstake/src/tests.rs +++ b/substrate/frame/fast-unstake/src/tests.rs @@ -110,7 +110,7 @@ fn cannot_register_if_head() { stashes: bounded_vec![(1, Deposit::get())], checked: bounded_vec![], }); - // Controller attempts to regsiter + // Controller attempts to register assert_noop!( FastUnstake::register_fast_unstake(RuntimeOrigin::signed(1)), Error::::AlreadyHead diff --git a/substrate/frame/glutton/Cargo.toml b/substrate/frame/glutton/Cargo.toml index 0ce870837624..5990477a80ab 100644 --- a/substrate/frame/glutton/Cargo.toml +++ b/substrate/frame/glutton/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] blake2 = { version = "0.10.4", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } log = { workspace = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/grandpa/Cargo.toml b/substrate/frame/grandpa/Cargo.toml index 9a38bdc649ba..bcc6d7646864 100644 --- a/substrate/frame/grandpa/Cargo.toml +++ b/substrate/frame/grandpa/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/grandpa/src/benchmarking.rs b/substrate/frame/grandpa/src/benchmarking.rs index 7a87f0c4b078..c89592b3b359 100644 --- a/substrate/frame/grandpa/src/benchmarking.rs +++ b/substrate/frame/grandpa/src/benchmarking.rs @@ -29,7 +29,7 @@ benchmarks! { // NOTE: generated with the test below `test_generate_equivocation_report_blob`. // the output should be deterministic since the keys we use are static. // with the current benchmark setup it is not possible to generate this - // programatically from the benchmark setup. + // programmatically from the benchmark setup. const EQUIVOCATION_PROOF_BLOB: [u8; 257] = [ 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 136, 220, 52, 23, 213, 5, 142, 196, 180, 80, 62, 12, 18, 234, 26, 10, 137, 190, 32, diff --git a/substrate/frame/grandpa/src/migrations/v5.rs b/substrate/frame/grandpa/src/migrations/v5.rs index e163c5fcb4b7..14fd215c1fc9 100644 --- a/substrate/frame/grandpa/src/migrations/v5.rs +++ b/substrate/frame/grandpa/src/migrations/v5.rs @@ -23,7 +23,7 @@ use core::marker::PhantomData; use frame_support::{ migrations::VersionedMigration, storage, - traits::{Get, OnRuntimeUpgrade}, + traits::{Get, UncheckedOnRuntimeUpgrade}, weights::Weight, }; use sp_consensus_grandpa::AuthorityList; @@ -38,9 +38,9 @@ fn load_authority_list() -> AuthorityList { } /// Actual implementation of [`MigrateV4ToV5`]. -pub struct MigrateImpl(PhantomData); +pub struct UncheckedMigrateImpl(PhantomData); -impl OnRuntimeUpgrade for MigrateImpl { +impl UncheckedOnRuntimeUpgrade for UncheckedMigrateImpl { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { use codec::Encode; @@ -94,5 +94,10 @@ impl OnRuntimeUpgrade for MigrateImpl { /// Migrate the storage from V4 to V5. /// /// Switches from `GRANDPA_AUTHORITIES_KEY` to a normal FRAME storage item. -pub type MigrateV4ToV5 = - VersionedMigration<4, 5, MigrateImpl, Pallet, ::DbWeight>; +pub type MigrateV4ToV5 = VersionedMigration< + 4, + 5, + UncheckedMigrateImpl, + Pallet, + ::DbWeight, +>; diff --git a/substrate/frame/grandpa/src/tests.rs b/substrate/frame/grandpa/src/tests.rs index 993d72af6d41..8b12d63adaad 100644 --- a/substrate/frame/grandpa/src/tests.rs +++ b/substrate/frame/grandpa/src/tests.rs @@ -634,7 +634,7 @@ fn report_equivocation_invalid_equivocation_proof() { (1, H256::zero(), 10, &equivocation_keyring), )); - // votes targetting different rounds, there is no equivocation. + // votes targeting different rounds, there is no equivocation. assert_invalid_equivocation_proof(generate_equivocation_proof( set_id, (1, H256::random(), 10, &equivocation_keyring), diff --git a/substrate/frame/identity/Cargo.toml b/substrate/frame/identity/Cargo.toml index b76d9078e31c..2e0901bcf847 100644 --- a/substrate/frame/identity/Cargo.toml +++ b/substrate/frame/identity/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } enumflags2 = { version = "0.7.7" } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/identity/src/benchmarking.rs b/substrate/frame/identity/src/benchmarking.rs index c7252b9796f0..fa1c7de9d0f3 100644 --- a/substrate/frame/identity/src/benchmarking.rs +++ b/substrate/frame/identity/src/benchmarking.rs @@ -672,10 +672,10 @@ mod benchmarks { let username = bounded_username::(bench_username(), bench_suffix()); Identity::::queue_acceptance(&caller, username.clone()); - let expected_exiration = + let expected_expiration = frame_system::Pallet::::block_number() + T::PendingUsernameExpiration::get(); - run_to_block::(expected_exiration + One::one()); + run_to_block::(expected_expiration + One::one()); #[extrinsic_call] _(RawOrigin::Signed(caller.clone()), username); diff --git a/substrate/frame/identity/src/lib.rs b/substrate/frame/identity/src/lib.rs index ec9f2da95c4b..4e7dc4c80836 100644 --- a/substrate/frame/identity/src/lib.rs +++ b/substrate/frame/identity/src/lib.rs @@ -164,7 +164,7 @@ pub mod pallet { /// Structure holding information about an identity. type IdentityInformation: IdentityInformationProvider; - /// Maxmimum number of registrars allowed in the system. Needed to bound the complexity + /// Maximum number of registrars allowed in the system. Needed to bound the complexity /// of, e.g., updating judgements. #[pallet::constant] type MaxRegistrars: Get; diff --git a/substrate/frame/identity/src/migration.rs b/substrate/frame/identity/src/migration.rs index 88ac08d1bf56..8725bfd39df1 100644 --- a/substrate/frame/identity/src/migration.rs +++ b/substrate/frame/identity/src/migration.rs @@ -16,7 +16,9 @@ //! Storage migrations for the Identity pallet. use super::*; -use frame_support::{migrations::VersionedMigration, pallet_prelude::*, traits::OnRuntimeUpgrade}; +use frame_support::{ + migrations::VersionedMigration, pallet_prelude::*, traits::UncheckedOnRuntimeUpgrade, +}; #[cfg(feature = "try-runtime")] use codec::{Decode, Encode}; @@ -66,7 +68,7 @@ pub mod v1 { /// prevent stalling a parachain by accumulating too much weight in the migration. To have an /// unlimited migration (e.g. in a chain without PoV limits), set this to `u64::MAX`. pub struct VersionUncheckedMigrateV0ToV1(PhantomData); - impl OnRuntimeUpgrade for VersionUncheckedMigrateV0ToV1 { + impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateV0ToV1 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, TryRuntimeError> { let identities = v0::IdentityOf::::iter().count(); diff --git a/substrate/frame/im-online/Cargo.toml b/substrate/frame/im-online/Cargo.toml index 2172af22650f..450f7ec720a4 100644 --- a/substrate/frame/im-online/Cargo.toml +++ b/substrate/frame/im-online/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/im-online/src/tests.rs b/substrate/frame/im-online/src/tests.rs index 5e5212e1d562..f9959593494a 100644 --- a/substrate/frame/im-online/src/tests.rs +++ b/substrate/frame/im-online/src/tests.rs @@ -416,7 +416,7 @@ fn should_handle_non_linear_session_progress() { Session::rotate_session(); - // if we don't have valid results for the current session progres then + // if we don't have valid results for the current session progress then // we'll fallback to `HeartbeatAfter` and only heartbeat on block 5. MockCurrentSessionProgress::mutate(|p| *p = Some(None)); assert_eq!(ImOnline::send_heartbeats(2).err(), Some(OffchainErr::TooEarly)); diff --git a/substrate/frame/indices/Cargo.toml b/substrate/frame/indices/Cargo.toml index cc4d748be563..677769ec42f9 100644 --- a/substrate/frame/indices/Cargo.toml +++ b/substrate/frame/indices/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/insecure-randomness-collective-flip/Cargo.toml b/substrate/frame/insecure-randomness-collective-flip/Cargo.toml index e3bf8f63bc7b..cdcf78debaf5 100644 --- a/substrate/frame/insecure-randomness-collective-flip/Cargo.toml +++ b/substrate/frame/insecure-randomness-collective-flip/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } safe-mix = { version = "1.0", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false } diff --git a/substrate/frame/lottery/Cargo.toml b/substrate/frame/lottery/Cargo.toml index b17ef84674c8..bcc3cedd3179 100644 --- a/substrate/frame/lottery/Cargo.toml +++ b/substrate/frame/lottery/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/membership/Cargo.toml b/substrate/frame/membership/Cargo.toml index d9c9368faa33..f2f05fbcbbd3 100644 --- a/substrate/frame/membership/Cargo.toml +++ b/substrate/frame/membership/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/membership/src/lib.rs b/substrate/frame/membership/src/lib.rs index 9fb31a1c7468..2e6f889f5626 100644 --- a/substrate/frame/membership/src/lib.rs +++ b/substrate/frame/membership/src/lib.rs @@ -371,6 +371,18 @@ impl, I: 'static> SortedMembers for Pallet { fn count() -> usize { Members::::decode_len().unwrap_or(0) } + + #[cfg(feature = "runtime-benchmarks")] + fn add(new_member: &T::AccountId) { + use frame_support::{assert_ok, traits::EnsureOrigin}; + let new_member_lookup = T::Lookup::unlookup(new_member.clone()); + + if let Ok(origin) = T::AddOrigin::try_successful_origin() { + assert_ok!(Pallet::::add_member(origin, new_member_lookup,)); + } else { + log::error!(target: LOG_TARGET, "Failed to add `{new_member:?}` in `SortedMembers::add`.") + } + } } #[cfg(feature = "runtime-benchmarks")] diff --git a/substrate/frame/merkle-mountain-range/Cargo.toml b/substrate/frame/merkle-mountain-range/Cargo.toml index 24797a916371..bf4623519c58 100644 --- a/substrate/frame/merkle-mountain-range/Cargo.toml +++ b/substrate/frame/merkle-mountain-range/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } @@ -28,7 +28,7 @@ sp-runtime = { path = "../../primitives/runtime", default-features = false } [dev-dependencies] array-bytes = "6.1" -env_logger = "0.9" +env_logger = "0.11" itertools = "0.10.3" [features] diff --git a/substrate/frame/merkle-mountain-range/src/lib.rs b/substrate/frame/merkle-mountain-range/src/lib.rs index d42ab4f1b4bc..12121b687d29 100644 --- a/substrate/frame/merkle-mountain-range/src/lib.rs +++ b/substrate/frame/merkle-mountain-range/src/lib.rs @@ -91,7 +91,7 @@ mod tests; /// is not available (since the block is not finished yet), /// we use the `parent_hash` here along with parent block number. pub struct ParentNumberAndHash { - _phanthom: core::marker::PhantomData, + _phantom: core::marker::PhantomData, } impl LeafDataProvider for ParentNumberAndHash { @@ -185,7 +185,6 @@ pub mod pallet { /// Latest MMR Root hash. #[pallet::storage] - #[pallet::getter(fn mmr_root_hash)] pub type RootHash, I: 'static = ()> = StorageValue<_, HashOf, ValueQuery>; /// Current size of the MMR (number of leaves). @@ -206,7 +205,7 @@ pub mod pallet { impl, I: 'static> Hooks> for Pallet { fn on_initialize(_n: BlockNumberFor) -> Weight { use primitives::LeafDataProvider; - let leaves = Self::mmr_leaves(); + let leaves = NumberOfLeaves::::get(); let peaks_before = sp_mmr_primitives::utils::NodesUtils::new(leaves).number_of_peaks(); let data = T::LeafData::leaf_data(); @@ -227,8 +226,8 @@ pub mod pallet { }; >::on_new_root(&root); - >::put(leaves); - >::put(root); + NumberOfLeaves::::put(leaves); + RootHash::::put(root); let peaks_after = sp_mmr_primitives::utils::NodesUtils::new(leaves).number_of_peaks(); @@ -303,7 +302,7 @@ impl, I: 'static> Pallet { { let first_mmr_block = utils::first_mmr_block_num::>( >::block_number(), - Self::mmr_leaves(), + NumberOfLeaves::::get(), )?; utils::block_num_to_leaf_index::>(block_num, first_mmr_block) @@ -343,7 +342,7 @@ impl, I: 'static> Pallet { /// Return the on-chain MMR root hash. pub fn mmr_root() -> HashOf { - Self::mmr_root_hash() + RootHash::::get() } /// Verify MMR proof for given `leaves`. @@ -356,7 +355,7 @@ impl, I: 'static> Pallet { leaves: Vec>, proof: primitives::Proof>, ) -> Result<(), primitives::Error> { - if proof.leaf_count > Self::mmr_leaves() || + if proof.leaf_count > NumberOfLeaves::::get() || proof.leaf_count == 0 || (proof.items.len().saturating_add(leaves.len())) as u64 > proof.leaf_count { diff --git a/substrate/frame/merkle-mountain-range/src/mmr/storage.rs b/substrate/frame/merkle-mountain-range/src/mmr/storage.rs index bf0fce9d79a9..d24db1d95d87 100644 --- a/substrate/frame/merkle-mountain-range/src/mmr/storage.rs +++ b/substrate/frame/merkle-mountain-range/src/mmr/storage.rs @@ -111,7 +111,7 @@ where L: primitives::FullLeaf, { fn get_elem(&self, pos: NodeIndex) -> mmr_lib::Result>> { - Ok(>::get(pos).map(Node::Hash)) + Ok(Nodes::::get(pos).map(Node::Hash)) } fn append(&mut self, pos: NodeIndex, elems: Vec>) -> mmr_lib::Result<()> { @@ -147,7 +147,7 @@ where for elem in elems { // On-chain we are going to only store new peaks. if peaks_to_store.next_if_eq(&node_index).is_some() { - >::insert(node_index, elem.hash()); + Nodes::::insert(node_index, elem.hash()); } // We are storing full node off-chain (using indexing API). Self::store_to_offchain(node_index, parent_hash, &elem); @@ -164,7 +164,7 @@ where // And remove all remaining items from `peaks_before` collection. for pos in peaks_to_prune { - >::remove(pos); + Nodes::::remove(pos); } Ok(()) diff --git a/substrate/frame/merkle-mountain-range/src/tests.rs b/substrate/frame/merkle-mountain-range/src/tests.rs index 429df75182ee..88de7511c9f2 100644 --- a/substrate/frame/merkle-mountain-range/src/tests.rs +++ b/substrate/frame/merkle-mountain-range/src/tests.rs @@ -608,9 +608,9 @@ fn verification_should_be_stateless() { let mut ext = new_test_ext(); let (root_6, root_7) = ext.execute_with(|| { add_blocks(6); - let root_6 = crate::Pallet::::mmr_root_hash(); + let root_6 = crate::Pallet::::mmr_root(); add_blocks(1); - let root_7 = crate::Pallet::::mmr_root_hash(); + let root_7 = crate::Pallet::::mmr_root(); (root_6, root_7) }); ext.persist_offchain_overlay(); @@ -656,9 +656,9 @@ fn should_verify_batch_proof_statelessly() { let mut ext = new_test_ext(); let (root_6, root_7) = ext.execute_with(|| { add_blocks(6); - let root_6 = crate::Pallet::::mmr_root_hash(); + let root_6 = crate::Pallet::::mmr_root(); add_blocks(1); - let root_7 = crate::Pallet::::mmr_root_hash(); + let root_7 = crate::Pallet::::mmr_root(); (root_6, root_7) }); ext.persist_offchain_overlay(); diff --git a/substrate/frame/message-queue/Cargo.toml b/substrate/frame/message-queue/Cargo.toml index af66e1aec560..f8f50a1a3b53 100644 --- a/substrate/frame/message-queue/Cargo.toml +++ b/substrate/frame/message-queue/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } log = { workspace = true } environmental = { version = "1.1.4", default-features = false } diff --git a/substrate/frame/message-queue/src/integration_test.rs b/substrate/frame/message-queue/src/integration_test.rs index 26a330cc88e8..14b8d2217eb2 100644 --- a/substrate/frame/message-queue/src/integration_test.rs +++ b/substrate/frame/message-queue/src/integration_test.rs @@ -73,6 +73,7 @@ impl Config for Test { type HeapSize = HeapSize; type MaxStale = MaxStale; type ServiceWeight = ServiceWeight; + type IdleMaxServiceWeight = (); } /// Simulates heavy usage by enqueueing and processing large amounts of messages. diff --git a/substrate/frame/message-queue/src/lib.rs b/substrate/frame/message-queue/src/lib.rs index b59b759ca4f3..a3d3fcc003ed 100644 --- a/substrate/frame/message-queue/src/lib.rs +++ b/substrate/frame/message-queue/src/lib.rs @@ -527,12 +527,21 @@ pub mod pallet { type MaxStale: Get; /// The amount of weight (if any) which should be provided to the message queue for - /// servicing enqueued items. + /// servicing enqueued items `on_initialize`. /// /// This may be legitimately `None` in the case that you will call - /// `ServiceQueues::service_queues` manually. + /// `ServiceQueues::service_queues` manually or set [`Self::IdleMaxServiceWeight`] to have + /// it run in `on_idle`. #[pallet::constant] type ServiceWeight: Get>; + + /// The maximum amount of weight (if any) to be used from remaining weight `on_idle` which + /// should be provided to the message queue for servicing enqueued items `on_idle`. + /// Useful for parachains to process messages at the same block they are received. + /// + /// If `None`, it will not call `ServiceQueues::service_queues` in `on_idle`. + #[pallet::constant] + type IdleMaxServiceWeight: Get>; } #[pallet::event] @@ -645,6 +654,15 @@ pub mod pallet { } } + fn on_idle(_n: BlockNumberFor, remaining_weight: Weight) -> Weight { + if let Some(weight_limit) = T::IdleMaxServiceWeight::get() { + // Make use of the remaining weight to process enqueued messages. + Self::service_queues(weight_limit.min(remaining_weight)) + } else { + Weight::zero() + } + } + #[cfg(feature = "try-runtime")] fn try_state(_: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { Self::do_try_state() @@ -1462,7 +1480,7 @@ impl Pallet { /// Run a closure that errors on re-entrance. Meant to be used by anything that services queues. pub(crate) fn with_service_mutex R, R>(f: F) -> Result { - // Holds the singelton token instance. + // Holds the singleton token instance. environmental::environmental!(token: Option<()>); token::using_once(&mut Some(()), || { diff --git a/substrate/frame/message-queue/src/mock.rs b/substrate/frame/message-queue/src/mock.rs index 61648847d44d..ddaedca88c26 100644 --- a/substrate/frame/message-queue/src/mock.rs +++ b/substrate/frame/message-queue/src/mock.rs @@ -56,6 +56,7 @@ impl Config for Test { type HeapSize = HeapSize; type MaxStale = MaxStale; type ServiceWeight = ServiceWeight; + type IdleMaxServiceWeight = ServiceWeight; } /// Mocked `WeightInfo` impl with allows to set the weight per call. diff --git a/substrate/frame/message-queue/src/tests.rs b/substrate/frame/message-queue/src/tests.rs index dbef94b3b103..d6788847d571 100644 --- a/substrate/frame/message-queue/src/tests.rs +++ b/substrate/frame/message-queue/src/tests.rs @@ -90,7 +90,7 @@ fn queue_priority_retains() { MessageQueue::enqueue_message(msg("d"), Everywhere(2)); assert_ring(&[Everywhere(1), Everywhere(2), Everywhere(3)]); // service head is 1, it will process a, leaving service head at 2. it also processes b but - // doees not empty queue 2, so service head will end at 2. + // does not empty queue 2, so service head will end at 2. assert_eq!(MessageQueue::service_queues(2.into_weight()), 2.into_weight()); assert_eq!( MessagesProcessed::take(), @@ -1838,3 +1838,45 @@ fn with_service_mutex_works() { with_service_mutex(|| called = 3).unwrap(); assert_eq!(called, 3); } + +#[test] +fn process_enqueued_on_idle() { + use MessageOrigin::*; + build_and_execute::(|| { + // Some messages enqueued on previous block. + MessageQueue::enqueue_messages(vec![msg("a"), msg("ab"), msg("abc")].into_iter(), Here); + assert_eq!(BookStateFor::::iter().count(), 1); + + // Process enqueued messages from previous block. + Pallet::::on_initialize(1); + assert_eq!( + MessagesProcessed::take(), + vec![(b"a".to_vec(), Here), (b"ab".to_vec(), Here), (b"abc".to_vec(), Here),] + ); + + MessageQueue::enqueue_messages(vec![msg("x"), msg("xy"), msg("xyz")].into_iter(), There); + assert_eq!(BookStateFor::::iter().count(), 2); + + // Enough weight to process on idle. + Pallet::::on_idle(1, Weight::from_parts(100, 100)); + assert_eq!( + MessagesProcessed::take(), + vec![(b"x".to_vec(), There), (b"xy".to_vec(), There), (b"xyz".to_vec(), There)] + ); + }) +} + +#[test] +fn process_enqueued_on_idle_requires_enough_weight() { + use MessageOrigin::*; + build_and_execute::(|| { + Pallet::::on_initialize(1); + + MessageQueue::enqueue_messages(vec![msg("x"), msg("xy"), msg("xyz")].into_iter(), There); + assert_eq!(BookStateFor::::iter().count(), 1); + + // Not enough weight to process on idle. + Pallet::::on_idle(1, Weight::from_parts(0, 0)); + assert_eq!(MessagesProcessed::take(), vec![]); + }) +} diff --git a/substrate/frame/migrations/Cargo.toml b/substrate/frame/migrations/Cargo.toml index c6829d25b195..16976a206640 100644 --- a/substrate/frame/migrations/Cargo.toml +++ b/substrate/frame/migrations/Cargo.toml @@ -12,10 +12,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -docify = "0.1.14" +docify = "0.2.8" impl-trait-for-tuples = "0.2.2" log = "0.4.21" -scale-info = { version = "2.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { default-features = false, optional = true, path = "../benchmarking" } frame-support = { default-features = false, path = "../support" } diff --git a/substrate/frame/migrations/src/lib.rs b/substrate/frame/migrations/src/lib.rs index 6f88ddd706e1..701bf0a44c7a 100644 --- a/substrate/frame/migrations/src/lib.rs +++ b/substrate/frame/migrations/src/lib.rs @@ -35,7 +35,7 @@ //! succeeding after two steps. A runtime upgrade is then enacted and the block number is advanced //! until all migrations finish executing. Afterwards, the recorded historic migrations are //! checked and events are asserted. -#![doc = docify::embed!("substrate/frame/migrations/src/tests.rs", simple_works)] +#![doc = docify::embed!("src/tests.rs", simple_works)] //! //! ## Pallet API //! diff --git a/substrate/frame/migrations/src/mock_helpers.rs b/substrate/frame/migrations/src/mock_helpers.rs index 1793ff80392e..2c52045be9e0 100644 --- a/substrate/frame/migrations/src/mock_helpers.rs +++ b/substrate/frame/migrations/src/mock_helpers.rs @@ -108,7 +108,7 @@ impl SteppedMigrations for MockedMigrations { cursor: Option>, meter: &mut WeightMeter, ) -> Option>, SteppedMigrationError>> { - // This is a hack but should be fine. We dont need it in testing. + // This is a hack but should be fine. We don't need it in testing. Self::nth_step(n, cursor, meter) } diff --git a/substrate/frame/mixnet/Cargo.toml b/substrate/frame/mixnet/Cargo.toml index 921dccf558df..aa613928d9a6 100644 --- a/substrate/frame/mixnet/Cargo.toml +++ b/substrate/frame/mixnet/Cargo.toml @@ -21,7 +21,7 @@ frame-benchmarking = { default-features = false, optional = true, path = "../ben frame-support = { default-features = false, path = "../support" } frame-system = { default-features = false, path = "../system" } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], workspace = true } sp-application-crypto = { default-features = false, path = "../../primitives/application-crypto" } sp-arithmetic = { default-features = false, path = "../../primitives/arithmetic" } diff --git a/substrate/frame/multisig/Cargo.toml b/substrate/frame/multisig/Cargo.toml index 4ef07aace703..dc7d1f566d75 100644 --- a/substrate/frame/multisig/Cargo.toml +++ b/substrate/frame/multisig/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/nft-fractionalization/Cargo.toml b/substrate/frame/nft-fractionalization/Cargo.toml index 37d35041a69c..59f650f3338b 100644 --- a/substrate/frame/nft-fractionalization/Cargo.toml +++ b/substrate/frame/nft-fractionalization/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/nfts/Cargo.toml b/substrate/frame/nfts/Cargo.toml index a37edde2b636..359d690d06c4 100644 --- a/substrate/frame/nfts/Cargo.toml +++ b/substrate/frame/nfts/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } enumflags2 = { version = "0.7.7" } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/nis/Cargo.toml b/substrate/frame/nis/Cargo.toml index 9b5bd8c3e73d..460281013a4d 100644 --- a/substrate/frame/nis/Cargo.toml +++ b/substrate/frame/nis/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/nis/README.md b/substrate/frame/nis/README.md index 032df7d01868..8a1a30f17e18 100644 --- a/substrate/frame/nis/README.md +++ b/substrate/frame/nis/README.md @@ -1,5 +1,5 @@ # NIS Module -Provides a non-interactiove variant of staking. +Provides a non-interactive variant of staking. License: Apache-2.0 diff --git a/substrate/frame/nis/src/lib.rs b/substrate/frame/nis/src/lib.rs index 6fd84337737f..8e00d1b3c8c1 100644 --- a/substrate/frame/nis/src/lib.rs +++ b/substrate/frame/nis/src/lib.rs @@ -427,7 +427,7 @@ pub mod pallet { }, /// An automatic funding of the deficit was made. Funded { deficit: BalanceOf }, - /// A receipt was transfered. + /// A receipt was transferred. Transferred { from: T::AccountId, to: T::AccountId, index: ReceiptIndex }, } @@ -458,7 +458,7 @@ pub mod pallet { AlreadyFunded, /// The thaw throttle has been reached for this period. Throttled, - /// The operation would result in a receipt worth an insignficant value. + /// The operation would result in a receipt worth an insignificant value. MakesDust, /// The receipt is already communal. AlreadyCommunal, diff --git a/substrate/frame/nis/src/tests.rs b/substrate/frame/nis/src/tests.rs index 7350da97dc60..01724999ae7e 100644 --- a/substrate/frame/nis/src/tests.rs +++ b/substrate/frame/nis/src/tests.rs @@ -414,7 +414,7 @@ fn thaw_respects_transfers() { assert_eq!(Balances::reserved_balance(&1), 0); assert_eq!(Balances::reserved_balance(&2), 40); - // Transfering the receipt... + // Transferring the receipt... assert_noop!(Nis::thaw_private(signed(1), 0, None), Error::::NotOwner); // ...and thawing is possible. diff --git a/substrate/frame/node-authorization/Cargo.toml b/substrate/frame/node-authorization/Cargo.toml index 2671d35f2881..60597b4d4c0b 100644 --- a/substrate/frame/node-authorization/Cargo.toml +++ b/substrate/frame/node-authorization/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } sp-core = { path = "../../primitives/core", default-features = false } diff --git a/substrate/frame/node-authorization/src/lib.rs b/substrate/frame/node-authorization/src/lib.rs index d3e50ff9a3e5..f2041e08cfe3 100644 --- a/substrate/frame/node-authorization/src/lib.rs +++ b/substrate/frame/node-authorization/src/lib.rs @@ -18,7 +18,7 @@ //! # Node authorization pallet //! //! This pallet manages a configurable set of nodes for a permissioned network. -//! Each node is dentified by a PeerId (i.e. `Vec`). It provides two ways to +//! Each node is identified by a PeerId (i.e. `Vec`). It provides two ways to //! authorize a node, //! //! - a set of well known nodes across different organizations in which the @@ -104,7 +104,7 @@ pub mod pallet { #[pallet::getter(fn owners)] pub type Owners = StorageMap<_, Blake2_128Concat, PeerId, T::AccountId>; - /// The additional adapative connections of each node. + /// The additional adaptive connections of each node. #[pallet::storage] #[pallet::getter(fn additional_connection)] pub type AdditionalConnections = @@ -163,7 +163,7 @@ pub mod pallet { NotClaimed, /// You are not the owner of the node. NotOwner, - /// No permisson to perform specific operation. + /// No permission to perform specific operation. PermissionDenied, } @@ -379,7 +379,7 @@ pub mod pallet { /// Add additional connections to a given node. /// /// - `node`: identifier of the node. - /// - `connections`: additonal nodes from which the connections are allowed. + /// - `connections`: additional nodes from which the connections are allowed. #[pallet::call_index(7)] #[pallet::weight(T::WeightInfo::add_connections())] pub fn add_connections( @@ -414,7 +414,7 @@ pub mod pallet { /// Remove additional connections of a given node. /// /// - `node`: identifier of the node. - /// - `connections`: additonal nodes from which the connections are not allowed anymore. + /// - `connections`: additional nodes from which the connections are not allowed anymore. #[pallet::call_index(8)] #[pallet::weight(T::WeightInfo::remove_connections())] pub fn remove_connections( diff --git a/substrate/frame/nomination-pools/Cargo.toml b/substrate/frame/nomination-pools/Cargo.toml index 87075be07609..302682d959d6 100644 --- a/substrate/frame/nomination-pools/Cargo.toml +++ b/substrate/frame/nomination-pools/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = [ +scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } diff --git a/substrate/frame/nomination-pools/benchmarking/Cargo.toml b/substrate/frame/nomination-pools/benchmarking/Cargo.toml index 7cc081730365..9e4570ebd2ff 100644 --- a/substrate/frame/nomination-pools/benchmarking/Cargo.toml +++ b/substrate/frame/nomination-pools/benchmarking/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # parity codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # FRAME frame-benchmarking = { path = "../../benchmarking", default-features = false } diff --git a/substrate/frame/nomination-pools/benchmarking/src/lib.rs b/substrate/frame/nomination-pools/benchmarking/src/lib.rs index 14ce31e0411b..d9e289d18696 100644 --- a/substrate/frame/nomination-pools/benchmarking/src/lib.rs +++ b/substrate/frame/nomination-pools/benchmarking/src/lib.rs @@ -798,9 +798,9 @@ frame_benchmarking::benchmarks! { T::Staking::active_stake(&pool_account).unwrap(), min_create_bond + min_join_bond ); - }:_(RuntimeOrigin::Signed(joiner.clone()), ClaimPermission::PermissionlessAll) + }:_(RuntimeOrigin::Signed(joiner.clone()), ClaimPermission::Permissioned) verify { - assert_eq!(ClaimPermissions::::get(joiner), ClaimPermission::PermissionlessAll); + assert_eq!(ClaimPermissions::::get(joiner), ClaimPermission::Permissioned); } claim_commission { diff --git a/substrate/frame/nomination-pools/src/lib.rs b/substrate/frame/nomination-pools/src/lib.rs index e8e44cd6c6b6..dbf6e8c84bfe 100644 --- a/substrate/frame/nomination-pools/src/lib.rs +++ b/substrate/frame/nomination-pools/src/lib.rs @@ -454,7 +454,7 @@ enum AccountType { /// The permission a pool member can set for other accounts to claim rewards on their behalf. #[derive(Encode, Decode, MaxEncodedLen, Clone, Copy, Debug, PartialEq, Eq, TypeInfo)] pub enum ClaimPermission { - /// Only the pool member themself can claim their rewards. + /// Only the pool member themselves can claim their rewards. Permissioned, /// Anyone can compound rewards on a pool member's behalf. PermissionlessCompound, @@ -464,22 +464,26 @@ pub enum ClaimPermission { PermissionlessAll, } +impl Default for ClaimPermission { + fn default() -> Self { + Self::PermissionlessWithdraw + } +} + impl ClaimPermission { + /// Permissionless compounding of pool rewards is allowed if the current permission is + /// `PermissionlessCompound`, or permissionless. fn can_bond_extra(&self) -> bool { matches!(self, ClaimPermission::PermissionlessAll | ClaimPermission::PermissionlessCompound) } + /// Permissionless payout claiming is allowed if the current permission is + /// `PermissionlessWithdraw`, or permissionless. fn can_claim_payout(&self) -> bool { matches!(self, ClaimPermission::PermissionlessAll | ClaimPermission::PermissionlessWithdraw) } } -impl Default for ClaimPermission { - fn default() -> Self { - Self::Permissioned - } -} - /// A member in a pool. #[derive( Encode, @@ -2069,7 +2073,7 @@ pub mod pallet { /// The member will earn rewards pro rata based on the members stake vs the sum of the /// members in the pools stake. Rewards do not "expire". /// - /// See `claim_payout_other` to caim rewards on bahalf of some `other` pool member. + /// See `claim_payout_other` to claim rewards on behalf of some `other` pool member. #[pallet::call_index(2)] #[pallet::weight(T::WeightInfo::claim_payout())] pub fn claim_payout(origin: OriginFor) -> DispatchResult { @@ -2633,7 +2637,7 @@ pub mod pallet { /// /// In the case of `origin != other`, `origin` can only bond extra pending rewards of /// `other` members assuming set_claim_permission for the given member is - /// `PermissionlessAll` or `PermissionlessCompound`. + /// `PermissionlessCompound` or `PermissionlessAll`. #[pallet::call_index(14)] #[pallet::weight( T::WeightInfo::bond_extra_transfer() @@ -2651,15 +2655,10 @@ pub mod pallet { /// Allows a pool member to set a claim permission to allow or disallow permissionless /// bonding and withdrawing. /// - /// By default, this is `Permissioned`, which implies only the pool member themselves can - /// claim their pending rewards. If a pool member wishes so, they can set this to - /// `PermissionlessAll` to allow any account to claim their rewards and bond extra to the - /// pool. - /// /// # Arguments /// /// * `origin` - Member of a pool. - /// * `actor` - Account to claim reward. // improve this + /// * `permission` - The permission to be applied. #[pallet::call_index(15)] #[pallet::weight(T::DbWeight::get().reads_writes(1, 1))] pub fn set_claim_permission( @@ -2669,16 +2668,18 @@ pub mod pallet { let who = ensure_signed(origin)?; ensure!(PoolMembers::::contains_key(&who), Error::::PoolMemberNotFound); + ClaimPermissions::::mutate(who, |source| { *source = permission; }); + Ok(()) } /// `origin` can claim payouts on some pool member `other`'s behalf. /// - /// Pool member `other` must have a `PermissionlessAll` or `PermissionlessWithdraw` in order - /// for this call to be successful. + /// Pool member `other` must have a `PermissionlessWithdraw` or `PermissionlessAll` claim + /// permission for this call to be successful. #[pallet::call_index(16)] #[pallet::weight(T::WeightInfo::claim_payout())] pub fn claim_payout_other(origin: OriginFor, other: T::AccountId) -> DispatchResult { @@ -2800,7 +2801,7 @@ pub mod pallet { /// Set or remove a pool's commission claim permission. /// /// Determines who can claim the pool's pending commission. Only the `Root` role of the pool - /// is able to conifigure commission claim permissions. + /// is able to configure commission claim permissions. #[pallet::call_index(22)] #[pallet::weight(T::WeightInfo::set_commission_claim_permission())] pub fn set_commission_claim_permission( @@ -2839,7 +2840,7 @@ pub mod pallet { assert!( T::Staking::bonding_duration() < TotalUnbondingPools::::get(), "There must be more unbonding pools then the bonding duration / - so a slash can be applied to relevant unboding pools. (We assume / + so a slash can be applied to relevant unbonding pools. (We assume / the bonding duration > slash deffer duration.", ); } diff --git a/substrate/frame/nomination-pools/src/migration.rs b/substrate/frame/nomination-pools/src/migration.rs index c7435796f1a5..661b784f8b78 100644 --- a/substrate/frame/nomination-pools/src/migration.rs +++ b/substrate/frame/nomination-pools/src/migration.rs @@ -17,8 +17,10 @@ use super::*; use crate::log; -use alloc::{collections::btree_map::BTreeMap, vec::Vec}; -use frame_support::traits::OnRuntimeUpgrade; +use frame_support::traits::{OnRuntimeUpgrade, UncheckedOnRuntimeUpgrade}; +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; +use alloc::collections::btree_map::BTreeMap; #[cfg(feature = "try-runtime")] use sp_runtime::TryRuntimeError; @@ -70,7 +72,7 @@ pub mod unversioned { fn on_runtime_upgrade() -> Weight { let migrated = BondedPools::::count(); - // recalcuate the `TotalValueLocked` to compare with the current on-chain TVL which may + // recalculate the `TotalValueLocked` to compare with the current on-chain TVL which may // be out of sync. let tvl: BalanceOf = helpers::calculate_tvl_by_total_stake::(); let onchain_tvl = TotalValueLocked::::get(); @@ -132,7 +134,7 @@ pub mod v8 { } pub struct VersionUncheckedMigrateV7ToV8(core::marker::PhantomData); - impl OnRuntimeUpgrade for VersionUncheckedMigrateV7ToV8 { + impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateV7ToV8 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, TryRuntimeError> { Ok(Vec::new()) @@ -211,7 +213,7 @@ pub(crate) mod v7 { CountedStorageMap, Twox64Concat, PoolId, V7BondedPoolInner>; pub struct VersionUncheckedMigrateV6ToV7(core::marker::PhantomData); - impl OnRuntimeUpgrade for VersionUncheckedMigrateV6ToV7 { + impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateV6ToV7 { fn on_runtime_upgrade() -> Weight { let migrated = BondedPools::::count(); // The TVL should be the sum of all the funds that are actively staked and in the @@ -282,7 +284,7 @@ mod v6 { }) } } - impl OnRuntimeUpgrade for MigrateToV6 { + impl UncheckedOnRuntimeUpgrade for MigrateToV6 { fn on_runtime_upgrade() -> Weight { let mut success = 0u64; let mut fail = 0u64; diff --git a/substrate/frame/nomination-pools/src/tests.rs b/substrate/frame/nomination-pools/src/tests.rs index 8fb2b41b88a1..32b3e9af3cd6 100644 --- a/substrate/frame/nomination-pools/src/tests.rs +++ b/substrate/frame/nomination-pools/src/tests.rs @@ -2441,16 +2441,10 @@ mod claim_payout { // given assert_eq!(Currency::free_balance(&10), 35); - // Permissioned by default - assert_noop!( - Pools::claim_payout_other(RuntimeOrigin::signed(80), 10), - Error::::DoesNotHavePermission - ); + // when - assert_ok!(Pools::set_claim_permission( - RuntimeOrigin::signed(10), - ClaimPermission::PermissionlessWithdraw - )); + // NOTE: Claim permission of `PermissionlessWithdraw` allows payout claiming as default, + // so a claim permission does not need to be set for non-pool members prior to claiming. assert_ok!(Pools::claim_payout_other(RuntimeOrigin::signed(80), 10)); // then @@ -2489,7 +2483,6 @@ mod unbond { ); // Make permissionless - assert_eq!(ClaimPermissions::::get(10), ClaimPermission::Permissioned); assert_ok!(Pools::set_claim_permission( RuntimeOrigin::signed(20), ClaimPermission::PermissionlessAll @@ -4563,12 +4556,11 @@ mod withdraw_unbonded { CurrentEra::set(1); assert_eq!(PoolMembers::::get(20).unwrap().points, 20); - assert_ok!(Pools::set_claim_permission( - RuntimeOrigin::signed(20), - ClaimPermission::PermissionlessAll - )); assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 20)); - assert_eq!(ClaimPermissions::::get(20), ClaimPermission::PermissionlessAll); + assert_eq!( + ClaimPermissions::::get(20), + ClaimPermission::PermissionlessWithdraw + ); assert_eq!( pool_events_since_last_call(), @@ -4792,7 +4784,7 @@ mod create { } #[test] -fn set_claimable_actor_works() { +fn set_claim_permission_works() { ExtBuilder::default().build_and_execute(|| { // Given Currency::set_balance(&11, ExistentialDeposit::get() + 2); @@ -4811,22 +4803,19 @@ fn set_claimable_actor_works() { ] ); - // Make permissionless - assert_eq!(ClaimPermissions::::get(11), ClaimPermission::Permissioned); + // Make permissioned + assert_eq!(ClaimPermissions::::get(11), ClaimPermission::PermissionlessWithdraw); assert_noop!( - Pools::set_claim_permission( - RuntimeOrigin::signed(12), - ClaimPermission::PermissionlessAll - ), + Pools::set_claim_permission(RuntimeOrigin::signed(12), ClaimPermission::Permissioned), Error::::PoolMemberNotFound ); assert_ok!(Pools::set_claim_permission( RuntimeOrigin::signed(11), - ClaimPermission::PermissionlessAll + ClaimPermission::Permissioned )); // then - assert_eq!(ClaimPermissions::::get(11), ClaimPermission::PermissionlessAll); + assert_eq!(ClaimPermissions::::get(11), ClaimPermission::Permissioned); }); } @@ -5224,7 +5213,7 @@ mod bond_extra { assert_ok!(Pools::set_claim_permission( RuntimeOrigin::signed(10), - ClaimPermission::PermissionlessAll + ClaimPermission::PermissionlessCompound )); assert_ok!(Pools::bond_extra_other(RuntimeOrigin::signed(50), 10, BondExtra::Rewards)); assert_eq!(Currency::free_balance(&default_reward_account()), 7); diff --git a/substrate/frame/nomination-pools/test-staking/Cargo.toml b/substrate/frame/nomination-pools/test-staking/Cargo.toml index e53599d78d9a..413d5e794f8d 100644 --- a/substrate/frame/nomination-pools/test-staking/Cargo.toml +++ b/substrate/frame/nomination-pools/test-staking/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dev-dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } -scale-info = { version = "2.10.0", features = ["derive"] } +scale-info = { version = "2.11.1", features = ["derive"] } sp-runtime = { path = "../../../primitives/runtime" } sp-io = { path = "../../../primitives/io" } diff --git a/substrate/frame/offences/Cargo.toml b/substrate/frame/offences/Cargo.toml index de8d022ecb13..4bf80736c549 100644 --- a/substrate/frame/offences/Cargo.toml +++ b/substrate/frame/offences/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/offences/benchmarking/Cargo.toml b/substrate/frame/offences/benchmarking/Cargo.toml index 4dfef65d1a54..2f82e80ba80f 100644 --- a/substrate/frame/offences/benchmarking/Cargo.toml +++ b/substrate/frame/offences/benchmarking/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../../benchmarking", default-features = false } frame-election-provider-support = { path = "../../election-provider-support", default-features = false } frame-support = { path = "../../support", default-features = false } diff --git a/substrate/frame/offences/src/tests.rs b/substrate/frame/offences/src/tests.rs index d525c7c3ab1d..4897b78f3e4d 100644 --- a/substrate/frame/offences/src/tests.rs +++ b/substrate/frame/offences/src/tests.rs @@ -204,7 +204,7 @@ fn reports_if_an_offence_is_dup() { assert_eq!(Offences::report_offence(vec![], test_offence.clone()), Ok(())); // creating a new offence for the same authorities on the next slot - // should be considered a new offence and thefore not known + // should be considered a new offence and therefore not known let test_offence_next_slot = offence(time_slot + 1, vec![0, 1]); assert!(!>::is_known_offence( &test_offence_next_slot.offenders, diff --git a/substrate/frame/paged-list/Cargo.toml b/substrate/frame/paged-list/Cargo.toml index 31ed240605a7..23443ca15c7b 100644 --- a/substrate/frame/paged-list/Cargo.toml +++ b/substrate/frame/paged-list/Cargo.toml @@ -16,8 +16,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -docify = "0.2.7" -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +docify = "0.2.8" +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/paged-list/src/paged_list.rs b/substrate/frame/paged-list/src/paged_list.rs index bd2ff08bb0d1..60e065291ad0 100644 --- a/substrate/frame/paged-list/src/paged_list.rs +++ b/substrate/frame/paged-list/src/paged_list.rs @@ -189,7 +189,7 @@ impl Page { let values = sp_io::storage::get(&key) .and_then(|raw| alloc::vec::Vec::::decode(&mut &raw[..]).ok())?; if values.is_empty() { - // Dont create empty pages. + // Don't create empty pages. return None } let values = values.into_iter().skip(value_index as usize); diff --git a/substrate/frame/parameters/Cargo.toml b/substrate/frame/parameters/Cargo.toml index ef6c377b1cc7..9b8f735cee25 100644 --- a/substrate/frame/parameters/Cargo.toml +++ b/substrate/frame/parameters/Cargo.toml @@ -9,10 +9,10 @@ edition.workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["max-encoded-len"] } -scale-info = { version = "2.1.2", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } paste = { version = "1.0.14", default-features = false } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } -docify = "0.2.5" +docify = "0.2.8" frame-support = { path = "../support", default-features = false, features = ["experimental"] } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/parameters/src/lib.rs b/substrate/frame/parameters/src/lib.rs index b6085a8e97f2..811ed5dc62de 100644 --- a/substrate/frame/parameters/src/lib.rs +++ b/substrate/frame/parameters/src/lib.rs @@ -124,7 +124,7 @@ use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; use frame_support::traits::{ - dynamic_params::{AggregratedKeyValue, IntoKey, Key, RuntimeParameterStore, TryIntoKey}, + dynamic_params::{AggregatedKeyValue, IntoKey, Key, RuntimeParameterStore, TryIntoKey}, EnsureOriginWithArg, }; @@ -137,10 +137,10 @@ pub use pallet::*; pub use weights::WeightInfo; /// The key type of a parameter. -type KeyOf = <::RuntimeParameters as AggregratedKeyValue>::Key; +type KeyOf = <::RuntimeParameters as AggregatedKeyValue>::Key; /// The value type of a parameter. -type ValueOf = <::RuntimeParameters as AggregratedKeyValue>::Value; +type ValueOf = <::RuntimeParameters as AggregatedKeyValue>::Value; #[frame_support::pallet] pub mod pallet { @@ -156,7 +156,7 @@ pub mod pallet { /// /// Usually created by [`frame_support::dynamic_params`] or equivalent. #[pallet::no_default_bounds] - type RuntimeParameters: AggregratedKeyValue; + type RuntimeParameters: AggregatedKeyValue; /// The origin which may update a parameter. /// @@ -177,11 +177,11 @@ pub mod pallet { /// Is also emitted when the value was not changed. Updated { /// The key that was updated. - key: ::Key, + key: ::Key, /// The old value before this call. - old_value: Option<::Value>, + old_value: Option<::Value>, /// The new value after this call. - new_value: Option<::Value>, + new_value: Option<::Value>, }, } @@ -247,23 +247,23 @@ pub mod pallet { } impl RuntimeParameterStore for Pallet { - type AggregratedKeyValue = T::RuntimeParameters; + type AggregatedKeyValue = T::RuntimeParameters; fn get(key: K) -> Option where - KV: AggregratedKeyValue, - K: Key + Into<::Key>, - ::Key: IntoKey< - <::AggregratedKeyValue as AggregratedKeyValue>::Key, + KV: AggregatedKeyValue, + K: Key + Into<::Key>, + ::Key: IntoKey< + <::AggregatedKeyValue as AggregatedKeyValue>::Key, >, - <::AggregratedKeyValue as AggregratedKeyValue>::Value: - TryIntoKey<::Value>, - ::Value: TryInto, + <::AggregatedKeyValue as AggregatedKeyValue>::Value: + TryIntoKey<::Value>, + ::Value: TryInto, { - let key: ::Key = key.into(); + let key: ::Key = key.into(); let val = Parameters::::get(key.into_key()); val.and_then(|v| { - let val: ::Value = v.try_into_key().ok()?; + let val: ::Value = v.try_into_key().ok()?; let val: K::WrappedValue = val.try_into().ok()?; let val = val.into(); Some(val) diff --git a/substrate/frame/parameters/src/tests/unit.rs b/substrate/frame/parameters/src/tests/unit.rs index d3f11ba96403..d811a8356465 100644 --- a/substrate/frame/parameters/src/tests/unit.rs +++ b/substrate/frame/parameters/src/tests/unit.rs @@ -25,7 +25,7 @@ use crate::tests::mock::{ RuntimeParametersValue, }; use codec::Encode; -use frame_support::{assert_noop, assert_ok, traits::dynamic_params::AggregratedKeyValue}; +use frame_support::{assert_noop, assert_ok, traits::dynamic_params::AggregatedKeyValue}; use sp_core::Get; use sp_runtime::DispatchError; diff --git a/substrate/frame/preimage/Cargo.toml b/substrate/frame/preimage/Cargo.toml index 816d6736cd36..de50f4c0a0de 100644 --- a/substrate/frame/preimage/Cargo.toml +++ b/substrate/frame/preimage/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/proxy/Cargo.toml b/substrate/frame/proxy/Cargo.toml index 58f30de82b17..b905bfbb368b 100644 --- a/substrate/frame/proxy/Cargo.toml +++ b/substrate/frame/proxy/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["max-encoded-len"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/proxy/src/lib.rs b/substrate/frame/proxy/src/lib.rs index 3f6d1503ac57..673a5dad2280 100644 --- a/substrate/frame/proxy/src/lib.rs +++ b/substrate/frame/proxy/src/lib.rs @@ -122,7 +122,7 @@ pub mod pallet { /// The currency mechanism. type Currency: ReservableCurrency; - /// A kind of proxy; specified with the proxy and passed in to the `IsProxyable` fitler. + /// A kind of proxy; specified with the proxy and passed in to the `IsProxyable` filter. /// The instance filter determines whether a given call may be proxied under this type. /// /// IMPORTANT: `Default` must be provided and MUST BE the the *most permissive* value. diff --git a/substrate/frame/ranked-collective/Cargo.toml b/substrate/frame/ranked-collective/Cargo.toml index c137148c33cd..d90a631251fe 100644 --- a/substrate/frame/ranked-collective/Cargo.toml +++ b/substrate/frame/ranked-collective/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/recovery/Cargo.toml b/substrate/frame/recovery/Cargo.toml index e6f3fe538415..88009b30d630 100644 --- a/substrate/frame/recovery/Cargo.toml +++ b/substrate/frame/recovery/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/referenda/Cargo.toml b/substrate/frame/referenda/Cargo.toml index 930340a55d15..d1160dc7e42c 100644 --- a/substrate/frame/referenda/Cargo.toml +++ b/substrate/frame/referenda/Cargo.toml @@ -20,7 +20,7 @@ assert_matches = { version = "1.5", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/referenda/src/lib.rs b/substrate/frame/referenda/src/lib.rs index d7b550df8d66..789cae465f24 100644 --- a/substrate/frame/referenda/src/lib.rs +++ b/substrate/frame/referenda/src/lib.rs @@ -426,6 +426,8 @@ pub mod pallet { BadStatus, /// The preimage does not exist. PreimageNotExist, + /// The preimage is stored with a different length than the one provided. + PreimageStoredWithDifferentLength, } #[pallet::hooks] @@ -464,6 +466,16 @@ pub mod pallet { let proposal_origin = *proposal_origin; let who = T::SubmitOrigin::ensure_origin(origin, &proposal_origin)?; + // If the pre-image is already stored, ensure that it has the same length as given in + // `proposal`. + if let (Some(preimage_len), Some(proposal_len)) = + (proposal.lookup_hash().and_then(|h| T::Preimages::len(&h)), proposal.lookup_len()) + { + if preimage_len != proposal_len { + return Err(Error::::PreimageStoredWithDifferentLength.into()) + } + } + let track = T::Tracks::track_for(&proposal_origin).map_err(|_| Error::::NoTrack)?; let submission_deposit = Self::take_deposit(who, T::SubmissionDeposit::get())?; diff --git a/substrate/frame/referenda/src/tests.rs b/substrate/frame/referenda/src/tests.rs index 8f51136de0bf..52251fcbdbee 100644 --- a/substrate/frame/referenda/src/tests.rs +++ b/substrate/frame/referenda/src/tests.rs @@ -666,3 +666,19 @@ fn clear_metadata_works() { })); }); } + +#[test] +fn detects_incorrect_len() { + ExtBuilder::default().build_and_execute(|| { + let hash = note_preimage(1); + assert_noop!( + Referenda::submit( + RuntimeOrigin::signed(1), + Box::new(RawOrigin::Root.into()), + frame_support::traits::Bounded::Lookup { hash, len: 3 }, + DispatchTime::At(1), + ), + Error::::PreimageStoredWithDifferentLength + ); + }); +} diff --git a/substrate/frame/remark/Cargo.toml b/substrate/frame/remark/Cargo.toml index 06aa1d32179a..7c41f34715b7 100644 --- a/substrate/frame/remark/Cargo.toml +++ b/substrate/frame/remark/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/root-offences/Cargo.toml b/substrate/frame/root-offences/Cargo.toml index 98958d209c4f..d90878e21893 100644 --- a/substrate/frame/root-offences/Cargo.toml +++ b/substrate/frame/root-offences/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } pallet-session = { path = "../session", default-features = false, features = ["historical"] } pallet-staking = { path = "../staking", default-features = false } diff --git a/substrate/frame/root-testing/Cargo.toml b/substrate/frame/root-testing/Cargo.toml index e10e8d62ea28..0909b00d33e5 100644 --- a/substrate/frame/root-testing/Cargo.toml +++ b/substrate/frame/root-testing/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } sp-core = { path = "../../primitives/core", default-features = false } diff --git a/substrate/frame/root-testing/src/lib.rs b/substrate/frame/root-testing/src/lib.rs index 177ce8957c49..23864b68239c 100644 --- a/substrate/frame/root-testing/src/lib.rs +++ b/substrate/frame/root-testing/src/lib.rs @@ -17,7 +17,7 @@ //! # Root Testing Pallet //! -//! Pallet that contains extrinsics that can be usefull in testing. +//! Pallet that contains extrinsics that can be useful in testing. //! //! NOTE: This pallet should only be used for testing purposes and should not be used in production //! runtimes! diff --git a/substrate/frame/safe-mode/Cargo.toml b/substrate/frame/safe-mode/Cargo.toml index 46ff391cbc12..03dfaf485d0e 100644 --- a/substrate/frame/safe-mode/Cargo.toml +++ b/substrate/frame/safe-mode/Cargo.toml @@ -16,11 +16,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -docify = "0.2.7" +docify = "0.2.8" frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false } pallet-balances = { path = "../balances", default-features = false, optional = true } diff --git a/substrate/frame/salary/Cargo.toml b/substrate/frame/salary/Cargo.toml index fb77ac11be9a..c559151ad183 100644 --- a/substrate/frame/salary/Cargo.toml +++ b/substrate/frame/salary/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/salary/src/tests/integration.rs b/substrate/frame/salary/src/tests/integration.rs index 26afb2d8abaf..124ab38c5651 100644 --- a/substrate/frame/salary/src/tests/integration.rs +++ b/substrate/frame/salary/src/tests/integration.rs @@ -250,7 +250,7 @@ fn swap_exhaustive_works() { }); assert_eq!(root_add, root_swap); - // Ensure that we dont compare trivial stuff like `()` from a type error above. + // Ensure that we don't compare trivial stuff like `()` from a type error above. assert_eq!(root_add.len(), 32); }); } diff --git a/substrate/frame/salary/src/tests/unit.rs b/substrate/frame/salary/src/tests/unit.rs index 5e3083541160..db1c8b947ef5 100644 --- a/substrate/frame/salary/src/tests/unit.rs +++ b/substrate/frame/salary/src/tests/unit.rs @@ -508,7 +508,7 @@ fn zero_payment_fails() { } #[test] -fn unregistered_bankrupcy_fails_gracefully() { +fn unregistered_bankruptcy_fails_gracefully() { new_test_ext().execute_with(|| { assert_ok!(Salary::init(RuntimeOrigin::signed(1))); set_rank(1, 2); @@ -532,7 +532,7 @@ fn unregistered_bankrupcy_fails_gracefully() { } #[test] -fn registered_bankrupcy_fails_gracefully() { +fn registered_bankruptcy_fails_gracefully() { new_test_ext().execute_with(|| { assert_ok!(Salary::init(RuntimeOrigin::signed(1))); set_rank(1, 2); @@ -561,7 +561,7 @@ fn registered_bankrupcy_fails_gracefully() { } #[test] -fn mixed_bankrupcy_fails_gracefully() { +fn mixed_bankruptcy_fails_gracefully() { new_test_ext().execute_with(|| { assert_ok!(Salary::init(RuntimeOrigin::signed(1))); set_rank(1, 2); @@ -589,7 +589,7 @@ fn mixed_bankrupcy_fails_gracefully() { } #[test] -fn other_mixed_bankrupcy_fails_gracefully() { +fn other_mixed_bankruptcy_fails_gracefully() { new_test_ext().execute_with(|| { assert_ok!(Salary::init(RuntimeOrigin::signed(1))); set_rank(1, 2); diff --git a/substrate/frame/sassafras/Cargo.toml b/substrate/frame/sassafras/Cargo.toml index b2172a2cfa22..72699f49765c 100644 --- a/substrate/frame/sassafras/Cargo.toml +++ b/substrate/frame/sassafras/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] scale-codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/sassafras/src/benchmarking.rs b/substrate/frame/sassafras/src/benchmarking.rs index 921f2f0793d3..2b2467c6f84d 100644 --- a/substrate/frame/sassafras/src/benchmarking.rs +++ b/substrate/frame/sassafras/src/benchmarking.rs @@ -84,7 +84,7 @@ mod benchmarks { // - load the full ring context. // - recompute the ring verifier. // - sorting the epoch tickets in one shot - // (here we account for the very unluky scenario where we haven't done any sort work yet) + // (here we account for the very unlucky scenario where we haven't done any sort work yet) // - pending epoch change config. // // For this bench we assume a redundancy factor of 2 (suggested value to be used in prod). @@ -139,8 +139,8 @@ mod benchmarks { TicketsIds::::insert((epoch_tag as u8, i), id); let body = TicketBody { attempt_idx: i, - erased_public: EphemeralPublic([i as u8; 32]), - revealed_public: EphemeralPublic([i as u8; 32]), + erased_public: EphemeralPublic::from([i as u8; 32]), + revealed_public: EphemeralPublic::from([i as u8; 32]), }; TicketsData::::set(id, Some(body)); }); @@ -236,8 +236,8 @@ mod benchmarks { .map(|i| { let body = TicketBody { attempt_idx: i, - erased_public: EphemeralPublic([i as u8; 32]), - revealed_public: EphemeralPublic([i as u8; 32]), + erased_public: EphemeralPublic::from([i as u8; 32]), + revealed_public: EphemeralPublic::from([i as u8; 32]), }; let id_bytes = crate::hashing::blake2_128(&i.to_le_bytes()); let id = TicketId::from_le_bytes(id_bytes); diff --git a/substrate/frame/sassafras/src/lib.rs b/substrate/frame/sassafras/src/lib.rs index ba71dd2cb9ab..720311dc984c 100644 --- a/substrate/frame/sassafras/src/lib.rs +++ b/substrate/frame/sassafras/src/lib.rs @@ -236,7 +236,7 @@ pub mod pallet { /// Epoch X first N-th ticket has key (X mod 2, N) /// /// Note that the ticket's index doesn't directly correspond to the slot index within the epoch. - /// The assigment is computed dynamically using an *outside-in* strategy. + /// The assignment is computed dynamically using an *outside-in* strategy. /// /// Be aware that entries within this map are never removed, only overwritten. /// Last element index should be fetched from the [`TicketsMeta`] value. @@ -467,7 +467,7 @@ pub mod pallet { /// Plan an epoch configuration change. /// - /// The epoch configuration change is recorded and will be announced at the begining + /// The epoch configuration change is recorded and will be announced at the beginning /// of the next epoch together with next epoch authorities information. /// In other words, the configuration will be enacted one epoch later. /// @@ -760,11 +760,11 @@ impl Pallet { let randomness = hashing::blake2_256(buf.as_slice()); RandomnessAccumulator::::put(randomness); - let next_randoness = Self::update_epoch_randomness(1); + let next_randomness = Self::update_epoch_randomness(1); // Deposit a log as this is the first block in first epoch. let next_epoch = NextEpochDescriptor { - randomness: next_randoness, + randomness: next_randomness, authorities: Self::next_authorities().into_inner(), config: None, }; diff --git a/substrate/frame/scheduler/Cargo.toml b/substrate/frame/scheduler/Cargo.toml index 02e6b23553f5..323f52f4eb96 100644 --- a/substrate/frame/scheduler/Cargo.toml +++ b/substrate/frame/scheduler/Cargo.toml @@ -15,14 +15,14 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } sp-io = { path = "../../primitives/io", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false } sp-weights = { path = "../../primitives/weights", default-features = false } -docify = "0.2.7" +docify = "0.2.8" [dev-dependencies] pallet-preimage = { path = "../preimage" } diff --git a/substrate/frame/scheduler/README.md b/substrate/frame/scheduler/README.md index 6aec2ddb0e43..5e233fdbdb0e 100644 --- a/substrate/frame/scheduler/README.md +++ b/substrate/frame/scheduler/README.md @@ -16,7 +16,7 @@ for the origin: namely `frame_system::Config::BaseCallFilter` for all origin except root which will get no filter. And not the filter contained in origin use to call `fn schedule`. -If a call is scheduled using proxy or whatever mecanism which adds filter, +If a call is scheduled using proxy or whatever mechanism which adds filter, then those filter will not be used when dispatching the schedule call. ## Interface diff --git a/substrate/frame/scheduler/src/benchmarking.rs b/substrate/frame/scheduler/src/benchmarking.rs index 613c4557434a..d35cd28f2666 100644 --- a/substrate/frame/scheduler/src/benchmarking.rs +++ b/substrate/frame/scheduler/src/benchmarking.rs @@ -212,7 +212,7 @@ benchmarks! { } verify { } - // `execute_dispatch` when the origin is `Signed`, not counting the dispatable's weight. + // `execute_dispatch` when the origin is `Signed`, not counting the dispatchable's weight. execute_dispatch_signed { let mut counter = WeightMeter::new(); let origin = make_origin::(true); @@ -223,7 +223,7 @@ benchmarks! { verify { } - // `execute_dispatch` when the origin is not `Signed`, not counting the dispatable's weight. + // `execute_dispatch` when the origin is not `Signed`, not counting the dispatchable's weight. execute_dispatch_unsigned { let mut counter = WeightMeter::new(); let origin = make_origin::(false); diff --git a/substrate/frame/scheduler/src/lib.rs b/substrate/frame/scheduler/src/lib.rs index 26f2fa4f499c..e0245f497500 100644 --- a/substrate/frame/scheduler/src/lib.rs +++ b/substrate/frame/scheduler/src/lib.rs @@ -46,7 +46,7 @@ //! 1. Scheduling a runtime call at a specific block. #![doc = docify::embed!("src/tests.rs", basic_scheduling_works)] //! -//! 2. Scheduling a preimage hash of a runtime call at a specifc block +//! 2. Scheduling a preimage hash of a runtime call at a specific block #![doc = docify::embed!("src/tests.rs", scheduling_with_preimages_works)] //! @@ -1271,6 +1271,17 @@ impl Pallet { id: task.maybe_id, }); + // It was not available when we needed it, so we don't need to have requested it + // anymore. + T::Preimages::drop(&task.call); + + // We don't know why `peek` failed, thus we most account here for the "full weight". + let _ = weight.try_consume(T::WeightInfo::service_task( + task.call.lookup_len().map(|x| x as usize), + task.maybe_id.is_some(), + task.maybe_periodic.is_some(), + )); + return Err((Unavailable, Some(task))) }, }; @@ -1439,6 +1450,7 @@ impl Pallet { } } +#[allow(deprecated)] impl schedule::v2::Anon, ::RuntimeCall, T::PalletsOrigin> for Pallet { @@ -1473,6 +1485,8 @@ impl schedule::v2::Anon, ::RuntimeCall } } +// TODO: migrate `schedule::v2::Anon` to `v3` +#[allow(deprecated)] impl schedule::v2::Named, ::RuntimeCall, T::PalletsOrigin> for Pallet { diff --git a/substrate/frame/scheduler/src/tests.rs b/substrate/frame/scheduler/src/tests.rs index 1ed2ca9e2f36..3023a370a4b6 100644 --- a/substrate/frame/scheduler/src/tests.rs +++ b/substrate/frame/scheduler/src/tests.rs @@ -1119,7 +1119,7 @@ fn reschedule_named_works() { } #[test] -fn reschedule_named_perodic_works() { +fn reschedule_named_periodic_works() { new_test_ext().execute_with(|| { let call = RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_parts(10, 0) }); @@ -1501,8 +1501,11 @@ fn scheduler_handles_periodic_unavailable_preimage() { run_to_block(4); assert_eq!(logger::log().len(), 1); - // Unnote the preimage + // As the public api doesn't support to remove a noted preimage, we need to first unnote it + // and then request it again. Basically this should not happen in real life (whatever you + // call real life;). Preimage::unnote(&hash); + Preimage::request(&hash); // Does not ever execute again. run_to_block(100); @@ -2285,9 +2288,18 @@ fn postponed_named_task_cannot_be_rescheduled() { // Run to a very large block. run_to_block(10); + // It was not executed. assert!(logger::log().is_empty()); - assert!(Preimage::is_requested(&hash)); + + // Preimage was not available + assert_eq!( + System::events().last().unwrap().event, + crate::Event::CallUnavailable { task: (4, 0), id: Some(name) }.into() + ); + + // So it should not be requested. + assert!(!Preimage::is_requested(&hash)); // Postponing removes the lookup. assert!(!Lookup::::contains_key(name)); @@ -2307,11 +2319,12 @@ fn postponed_named_task_cannot_be_rescheduled() { ); // Finally add the preimage. - assert_ok!(Preimage::note(call.encode().into())); + assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(0), call.encode())); + run_to_block(1000); // It did not execute. assert!(logger::log().is_empty()); - assert!(Preimage::is_requested(&hash)); + assert!(!Preimage::is_requested(&hash)); // Manually re-schedule the call by name does not work. assert_err!( @@ -2980,7 +2993,7 @@ fn reschedule_named_last_task_removes_agenda() { }); } -/// Ensures that an unvailable call sends an event. +/// Ensures that an unavailable call sends an event. #[test] fn unavailable_call_is_detected() { use frame_support::traits::schedule::v3::Named; @@ -3008,6 +3021,8 @@ fn unavailable_call_is_detected() { // Ensure the preimage isn't available assert!(!Preimage::have(&bound)); + // But we have requested it + assert!(Preimage::is_requested(&hash)); // Executes in block 4. run_to_block(4); @@ -3016,5 +3031,7 @@ fn unavailable_call_is_detected() { System::events().last().unwrap().event, crate::Event::CallUnavailable { task: (4, 0), id: Some(name) }.into() ); + // It should not be requested anymore. + assert!(!Preimage::is_requested(&hash)); }); } diff --git a/substrate/frame/scored-pool/Cargo.toml b/substrate/frame/scored-pool/Cargo.toml index 0ef19961ad92..96aa178553a3 100644 --- a/substrate/frame/scored-pool/Cargo.toml +++ b/substrate/frame/scored-pool/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } sp-io = { path = "../../primitives/io", default-features = false } diff --git a/substrate/frame/session/Cargo.toml b/substrate/frame/session/Cargo.toml index 4ecb3b14cc21..e0e67f395db0 100644 --- a/substrate/frame/session/Cargo.toml +++ b/substrate/frame/session/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } pallet-timestamp = { path = "../timestamp", default-features = false } diff --git a/substrate/frame/session/benchmarking/Cargo.toml b/substrate/frame/session/benchmarking/Cargo.toml index c193d0863ac1..938e56b2edc0 100644 --- a/substrate/frame/session/benchmarking/Cargo.toml +++ b/substrate/frame/session/benchmarking/Cargo.toml @@ -28,7 +28,7 @@ sp-session = { path = "../../../primitives/session", default-features = false } [dev-dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } -scale-info = "2.10.0" +scale-info = "2.11.1" frame-election-provider-support = { path = "../../election-provider-support" } pallet-balances = { path = "../../balances" } pallet-staking-reward-curve = { path = "../../staking/reward-curve" } diff --git a/substrate/frame/society/Cargo.toml b/substrate/frame/society/Cargo.toml index b6f1323ca4f5..4afc55fdaf16 100644 --- a/substrate/frame/society/Cargo.toml +++ b/substrate/frame/society/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { workspace = true } rand_chacha = { version = "0.2", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } sp-io = { path = "../../primitives/io", default-features = false } diff --git a/substrate/frame/society/src/lib.rs b/substrate/frame/society/src/lib.rs index 09bda5a541de..219db492e480 100644 --- a/substrate/frame/society/src/lib.rs +++ b/substrate/frame/society/src/lib.rs @@ -1511,7 +1511,7 @@ impl, I: 'static> Pallet { // the Founder. if MemberCount::::get() > 2 { let defender = next_defender - .or_else(|| Self::pick_defendent(rng)) + .or_else(|| Self::pick_defendant(rng)) .expect("exited if members empty; qed"); let skeptic = Self::pick_member_except(rng, &defender).expect("exited if members empty; qed"); @@ -1873,7 +1873,7 @@ impl, I: 'static> Pallet { /// /// If only the Founder and Head members exist (or the state is inconsistent), then `None` /// may be returned. - fn pick_defendent(rng: &mut impl RngCore) -> Option { + fn pick_defendant(rng: &mut impl RngCore) -> Option { let member_count = MemberCount::::get(); if member_count <= 2 { return None @@ -1978,7 +1978,7 @@ impl, I: 'static> Pallet { /// Transfer some `amount` from the main account into the payouts account and reduce the Pot /// by this amount. fn reserve_payout(amount: BalanceOf) { - // Tramsfer payout from the Pot into the payouts account. + // Transfer payout from the Pot into the payouts account. Pot::::mutate(|pot| pot.saturating_reduce(amount)); // this should never fail since we ensure we can afford the payouts in a previous @@ -1990,7 +1990,7 @@ impl, I: 'static> Pallet { /// Transfer some `amount` from the main account into the payouts account and increase the Pot /// by this amount. fn unreserve_payout(amount: BalanceOf) { - // Tramsfer payout from the Pot into the payouts account. + // Transfer payout from the Pot into the payouts account. Pot::::mutate(|pot| pot.saturating_accrue(amount)); // this should never fail since we ensure we can afford the payouts in a previous diff --git a/substrate/frame/society/src/migrations.rs b/substrate/frame/society/src/migrations.rs index cbc8fd11c081..396ed787c784 100644 --- a/substrate/frame/society/src/migrations.rs +++ b/substrate/frame/society/src/migrations.rs @@ -20,7 +20,7 @@ use super::*; use alloc::{vec, vec::Vec}; use codec::{Decode, Encode}; -use frame_support::traits::{Defensive, DefensiveOption, Instance, OnRuntimeUpgrade}; +use frame_support::traits::{Defensive, DefensiveOption, Instance, UncheckedOnRuntimeUpgrade}; #[cfg(feature = "try-runtime")] use sp_runtime::TryRuntimeError; @@ -37,7 +37,7 @@ impl< T: Config, I: Instance + 'static, PastPayouts: Get::AccountId, BalanceOf)>>, - > OnRuntimeUpgrade for VersionUncheckedMigrateToV2 + > UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateToV2 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, TryRuntimeError> { @@ -241,7 +241,7 @@ pub fn assert_internal_consistency, I: Instance + 'static>() { pub fn from_original, I: Instance + 'static>( past_payouts: &mut [(::AccountId, BalanceOf)], ) -> Result { - // Migrate Bids from old::Bids (just a trunctation). + // Migrate Bids from old::Bids (just a truncation). Bids::::put(BoundedVec::<_, T::MaxBids>::truncate_from(v0::Bids::::take())); // Initialise round counter. @@ -288,13 +288,13 @@ pub fn from_original, I: Instance + 'static>( .defensive_ok_or("member_count > 0, we must have at least 1 member")?; // Swap the founder with the first member in MemberByIndex. MemberByIndex::::swap(0, member_count); - // Update the indicies of the swapped member MemberRecords. + // Update the indices of the swapped member MemberRecords. Members::::mutate(&member, |m| { if let Some(member) = m { member.index = 0; } else { frame_support::defensive!( - "Member somehow disapeared from storage after it was inserted" + "Member somehow disappeared from storage after it was inserted" ); } }); @@ -303,7 +303,7 @@ pub fn from_original, I: Instance + 'static>( member.index = member_count; } else { frame_support::defensive!( - "Member somehow disapeared from storage after it was queried" + "Member somehow disappeared from storage after it was queried" ); } }); diff --git a/substrate/frame/society/src/tests.rs b/substrate/frame/society/src/tests.rs index 411567e1dedf..5f8ecc9a7c52 100644 --- a/substrate/frame/society/src/tests.rs +++ b/substrate/frame/society/src/tests.rs @@ -541,7 +541,7 @@ fn suspended_candidate_rejected_works() { assert_ok!(Society::vote(Origin::signed(30), x, true)); } - // Voting continues, as no canidate is clearly accepted yet and the founder chooses not to + // Voting continues, as no candidate is clearly accepted yet and the founder chooses not to // act. conclude_intake(false, None); assert_eq!(members(), vec![10, 20, 30]); diff --git a/substrate/frame/src/lib.rs b/substrate/frame/src/lib.rs index c8ef8df4e73b..add497d05bea 100644 --- a/substrate/frame/src/lib.rs +++ b/substrate/frame/src/lib.rs @@ -45,16 +45,20 @@ //! //! In short, this crate only re-exports types and traits from multiple sources. All of these //! sources are listed (and re-exported again) in [`deps`]. +//! +//! ## Usage +//! +//! Please note that this crate can only be imported as `polkadot-sdk-frame` or `frame`. #![cfg_attr(not(feature = "std"), no_std)] #![cfg(feature = "experimental")] /// Exports the main pallet macro. This can wrap a `mod pallet` and will transform it into -/// being a pallet, eg `#[frame::pallet] mod pallet { .. }`. +/// being a pallet, eg `#[polkadot_sdk_frame::pallet] mod pallet { .. }`. /// /// Note that this is not part of the prelude, in order to make it such that the common way to -/// define a macro is `#[frame::pallet] mod pallet { .. }`, followed by `#[pallet::foo]`, -/// `#[pallet::bar]` inside the mod. +/// define a macro is `#[polkadot_sdk_frame::pallet] mod pallet { .. }`, followed by +/// `#[pallet::foo]`, `#[pallet::bar]` inside the mod. pub use frame_support::pallet; pub use frame_support::pallet_macros::{import_section, pallet_section}; @@ -75,7 +79,7 @@ pub mod pallet_macros { /// This prelude should almost always be the first line of code in any pallet or runtime. /// /// ``` -/// use frame::prelude::*; +/// use polkadot_sdk_frame::prelude::*; /// /// // rest of your pallet.. /// mod pallet {} @@ -84,7 +88,7 @@ pub mod prelude { /// `frame_system`'s parent crate, which is mandatory in all pallets build with this crate. /// /// Conveniently, the keyword `frame_system` is in scope as one uses `use - /// frame::prelude::*` + /// polkadot_sdk_frame::prelude::*` #[doc(inline)] pub use frame_system; @@ -108,7 +112,7 @@ pub mod prelude { /// A test setup typically starts with: /// /// ``` -/// use frame::testing_prelude::*; +/// use polkadot_sdk_frame::testing_prelude::*; /// // rest of your test setup. /// ``` #[cfg(feature = "std")] @@ -136,7 +140,7 @@ pub mod runtime { /// A runtime typically starts with: /// /// ``` - /// use frame::{prelude::*, runtime::prelude::*}; + /// use polkadot_sdk_frame::{prelude::*, runtime::prelude::*}; /// ``` pub mod prelude { /// All of the types related to the FRAME runtime executive. @@ -181,7 +185,7 @@ pub mod runtime { /// A non-testing runtime should have this enabled, as such: /// /// ``` - /// use frame::runtime::{prelude::*, apis::{*,}}; + /// use polkadot_sdk_frame::runtime::{prelude::*, apis::{*,}}; /// ``` // TODO: This is because of wildcard imports, and it should be not needed once we can avoid // that. Imports like that are needed because we seem to need some unknown types in the macro @@ -325,8 +329,8 @@ pub mod derive { /// In most cases, hopefully the answer is yes. pub mod deps { // TODO: It would be great to somehow instruct RA to prefer *not* suggesting auto-imports from - // these. For example, we prefer `frame::derive::CloneNoBound` rather than - // `frame::deps::frame_support::CloneNoBound`. + // these. For example, we prefer `polkadot_sdk_frame::derive::CloneNoBound` rather than + // `polkadot_sdk_frame::deps::frame_support::CloneNoBound`. pub use frame_support; pub use frame_system; diff --git a/substrate/frame/staking/Cargo.toml b/substrate/frame/staking/Cargo.toml index 2149148018e6..42ef96499f57 100644 --- a/substrate/frame/staking/Cargo.toml +++ b/substrate/frame/staking/Cargo.toml @@ -20,7 +20,7 @@ serde = { features = ["alloc", "derive"], workspace = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } sp-io = { path = "../../primitives/io", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } sp-staking = { path = "../../primitives/staking", default-features = false, features = ["serde"] } @@ -39,10 +39,10 @@ frame-benchmarking = { path = "../benchmarking", default-features = false, optio rand_chacha = { version = "0.2", default-features = false, optional = true } [dev-dependencies] +pallet-balances = { path = "../balances" } sp-tracing = { path = "../../primitives/tracing" } sp-core = { path = "../../primitives/core" } sp-npos-elections = { path = "../../primitives/npos-elections" } -pallet-balances = { path = "../balances" } pallet-timestamp = { path = "../timestamp" } pallet-staking-reward-curve = { path = "reward-curve" } pallet-bags-list = { path = "../bags-list" } diff --git a/substrate/frame/staking/src/benchmarking.rs b/substrate/frame/staking/src/benchmarking.rs index cea2eb4ab0de..e9a6f3f64a71 100644 --- a/substrate/frame/staking/src/benchmarking.rs +++ b/substrate/frame/staking/src/benchmarking.rs @@ -952,6 +952,15 @@ benchmarks! { assert_eq!(MinCommission::::get(), Perbill::from_percent(100)); } + restore_ledger { + let (stash, controller) = create_stash_controller::(0, 100, RewardDestination::Staked)?; + // corrupt ledger. + Ledger::::remove(controller); + }: _(RawOrigin::Root, stash.clone(), None, None, None) + verify { + assert_eq!(Staking::::inspect_bond_state(&stash), Ok(LedgerIntegrityState::Ok)); + } + impl_benchmark_test_suite!( Staking, crate::mock::ExtBuilder::default().has_stakers(true), diff --git a/substrate/frame/staking/src/ledger.rs b/substrate/frame/staking/src/ledger.rs index 890751521c1b..71843ce7fef7 100644 --- a/substrate/frame/staking/src/ledger.rs +++ b/substrate/frame/staking/src/ledger.rs @@ -129,7 +129,7 @@ impl StakingLedger { // if ledger bond is in a bad state, return error to prevent applying operations that may // further spoil the ledger's state. A bond is in bad state when the bonded controller is - // associted with a different ledger (i.e. a ledger with a different stash). + // associated with a different ledger (i.e. a ledger with a different stash). // // See for more details. ensure!( diff --git a/substrate/frame/staking/src/lib.rs b/substrate/frame/staking/src/lib.rs index fc58e4c520df..45d8a5b02f47 100644 --- a/substrate/frame/staking/src/lib.rs +++ b/substrate/frame/staking/src/lib.rs @@ -495,6 +495,20 @@ pub struct StakingLedger { controller: Option, } +/// State of a ledger with regards with its data and metadata integrity. +#[derive(PartialEq, Debug)] +enum LedgerIntegrityState { + /// Ledger, bond and corresponding staking lock is OK. + Ok, + /// Ledger and/or bond is corrupted. This means that the bond has a ledger with a different + /// stash than the bonded stash. + Corrupted, + /// Ledger was corrupted and it has been killed. + CorruptedKilled, + /// Ledger and bond are OK, however the ledger's stash lock is out of sync. + LockCorrupted, +} + impl StakingLedger { /// Remove entries from `unlocking` that are sufficiently old and reduce the /// total by the sum of their balances. diff --git a/substrate/frame/staking/src/migrations.rs b/substrate/frame/staking/src/migrations.rs index 2c3dd47848a9..33aec5dc123c 100644 --- a/substrate/frame/staking/src/migrations.rs +++ b/substrate/frame/staking/src/migrations.rs @@ -30,7 +30,7 @@ use frame_support::ensure; #[cfg(feature = "try-runtime")] use sp_runtime::TryRuntimeError; -/// Used for release versioning upto v12. +/// Used for release versioning up to v12. /// /// Obsolete from v13. Keeping around to make encoding/decoding of old migration code easier. #[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] diff --git a/substrate/frame/staking/src/mock.rs b/substrate/frame/staking/src/mock.rs index 6c2ea225ff1e..6db462c1a70f 100644 --- a/substrate/frame/staking/src/mock.rs +++ b/substrate/frame/staking/src/mock.rs @@ -25,8 +25,8 @@ use frame_election_provider_support::{ use frame_support::{ assert_ok, derive_impl, ord_parameter_types, parameter_types, traits::{ - ConstU64, Currency, EitherOfDiverse, FindAuthor, Get, Hooks, Imbalance, OnUnbalanced, - OneSessionHandler, + ConstU64, Currency, EitherOfDiverse, FindAuthor, Get, Hooks, Imbalance, LockableCurrency, + OnUnbalanced, OneSessionHandler, WithdrawReasons, }, weights::constants::RocksDbWeight, }; @@ -786,55 +786,86 @@ pub(crate) fn bond_controller_stash(controller: AccountId, stash: AccountId) -> Ok(()) } +// simulates `set_controller` without corrupted ledger checks for testing purposes. +pub(crate) fn set_controller_no_checks(stash: &AccountId) { + let controller = Bonded::::get(stash).expect("testing stash should be bonded"); + let ledger = Ledger::::get(&controller).expect("testing ledger should exist"); + + Ledger::::remove(&controller); + Ledger::::insert(stash, ledger); + Bonded::::insert(stash, stash); +} + +// simulates `bond_extra` without corrupted ledger checks for testing purposes. +pub(crate) fn bond_extra_no_checks(stash: &AccountId, amount: Balance) { + let controller = Bonded::::get(stash).expect("bond must exist to bond_extra"); + let mut ledger = Ledger::::get(&controller).expect("ledger must exist to bond_extra"); + + let new_total = ledger.total + amount; + Balances::set_lock(crate::STAKING_ID, stash, new_total, WithdrawReasons::all()); + ledger.total = new_total; + ledger.active = new_total; + Ledger::::insert(controller, ledger); +} + pub(crate) fn setup_double_bonded_ledgers() { - assert_ok!(Staking::bond(RuntimeOrigin::signed(1), 10, RewardDestination::Staked)); - assert_ok!(Staking::bond(RuntimeOrigin::signed(2), 20, RewardDestination::Staked)); - assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 20, RewardDestination::Staked)); + let init_ledgers = Ledger::::iter().count(); + + let _ = Balances::make_free_balance_be(&333, 2000); + let _ = Balances::make_free_balance_be(&444, 2000); + let _ = Balances::make_free_balance_be(&555, 2000); + let _ = Balances::make_free_balance_be(&777, 2000); + + assert_ok!(Staking::bond(RuntimeOrigin::signed(333), 10, RewardDestination::Staked)); + assert_ok!(Staking::bond(RuntimeOrigin::signed(444), 20, RewardDestination::Staked)); + assert_ok!(Staking::bond(RuntimeOrigin::signed(555), 20, RewardDestination::Staked)); // not relevant to the test case, but ensures try-runtime checks pass. - [1, 2, 3] + [333, 444, 555] .iter() .for_each(|s| Payee::::insert(s, RewardDestination::Staked)); // we want to test the case where a controller can also be a stash of another ledger. // for that, we change the controller/stash bonding so that: - // * 2 becomes controller of 1. - // * 3 becomes controller of 2. - // * 4 becomes controller of 3. - let ledger_1 = Ledger::::get(1).unwrap(); - let ledger_2 = Ledger::::get(2).unwrap(); - let ledger_3 = Ledger::::get(3).unwrap(); - - // 4 becomes controller of 3. - Bonded::::mutate(3, |controller| *controller = Some(4)); - Ledger::::insert(4, ledger_3); - - // 3 becomes controller of 2. - Bonded::::mutate(2, |controller| *controller = Some(3)); - Ledger::::insert(3, ledger_2); - - // 2 becomes controller of 1 - Bonded::::mutate(1, |controller| *controller = Some(2)); - Ledger::::insert(2, ledger_1); - // 1 is not controller anymore. - Ledger::::remove(1); + // * 444 becomes controller of 333. + // * 555 becomes controller of 444. + // * 777 becomes controller of 555. + let ledger_333 = Ledger::::get(333).unwrap(); + let ledger_444 = Ledger::::get(444).unwrap(); + let ledger_555 = Ledger::::get(555).unwrap(); + + // 777 becomes controller of 555. + Bonded::::mutate(555, |controller| *controller = Some(777)); + Ledger::::insert(777, ledger_555); + + // 555 becomes controller of 444. + Bonded::::mutate(444, |controller| *controller = Some(555)); + Ledger::::insert(555, ledger_444); + + // 444 becomes controller of 333. + Bonded::::mutate(333, |controller| *controller = Some(444)); + Ledger::::insert(444, ledger_333); + + // 333 is not controller anymore. + Ledger::::remove(333); // checks. now we have: - // * 3 ledgers - assert_eq!(Ledger::::iter().count(), 3); - // * stash 1 has controller 2. - assert_eq!(Bonded::::get(1), Some(2)); - assert_eq!(StakingLedger::::paired_account(StakingAccount::Stash(1)), Some(2)); - assert_eq!(Ledger::::get(2).unwrap().stash, 1); - - // * stash 2 has controller 3. - assert_eq!(Bonded::::get(2), Some(3)); - assert_eq!(StakingLedger::::paired_account(StakingAccount::Stash(2)), Some(3)); - assert_eq!(Ledger::::get(3).unwrap().stash, 2); - - // * stash 3 has controller 4. - assert_eq!(Bonded::::get(3), Some(4)); - assert_eq!(StakingLedger::::paired_account(StakingAccount::Stash(3)), Some(4)); - assert_eq!(Ledger::::get(4).unwrap().stash, 3); + // * +3 ledgers + assert_eq!(Ledger::::iter().count(), 3 + init_ledgers); + + // * stash 333 has controller 444. + assert_eq!(Bonded::::get(333), Some(444)); + assert_eq!(StakingLedger::::paired_account(StakingAccount::Stash(333)), Some(444)); + assert_eq!(Ledger::::get(444).unwrap().stash, 333); + + // * stash 444 has controller 555. + assert_eq!(Bonded::::get(444), Some(555)); + assert_eq!(StakingLedger::::paired_account(StakingAccount::Stash(444)), Some(555)); + assert_eq!(Ledger::::get(555).unwrap().stash, 444); + + // * stash 555 has controller 777. + assert_eq!(Bonded::::get(555), Some(777)); + assert_eq!(StakingLedger::::paired_account(StakingAccount::Stash(555)), Some(777)); + assert_eq!(Ledger::::get(777).unwrap().stash, 555); } #[macro_export] diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs index f6cb829002cb..e1a442c3b6de 100644 --- a/substrate/frame/staking/src/pallet/impls.rs +++ b/substrate/frame/staking/src/pallet/impls.rs @@ -29,8 +29,8 @@ use frame_support::{ dispatch::WithPostDispatchInfo, pallet_prelude::*, traits::{ - Currency, Defensive, DefensiveSaturating, EstimateNextNewSession, Get, Imbalance, Len, - OnUnbalanced, TryCollect, UnixTime, + Currency, Defensive, DefensiveSaturating, EstimateNextNewSession, Get, Imbalance, + InspectLockableCurrency, Len, OnUnbalanced, TryCollect, UnixTime, }, weights::Weight, }; @@ -51,8 +51,8 @@ use sp_staking::{ use crate::{ election_size_tracker::StaticTracker, log, slashing, weights::WeightInfo, ActiveEraInfo, BalanceOf, EraInfo, EraPayout, Exposure, ExposureOf, Forcing, IndividualExposure, - MaxNominationsOf, MaxWinnersOf, Nominations, NominationsQuota, PositiveImbalanceOf, - RewardDestination, SessionInterface, StakingLedger, ValidatorPrefs, + LedgerIntegrityState, MaxNominationsOf, MaxWinnersOf, Nominations, NominationsQuota, + PositiveImbalanceOf, RewardDestination, SessionInterface, StakingLedger, ValidatorPrefs, }; use super::pallet::*; @@ -85,6 +85,38 @@ impl Pallet { StakingLedger::::paired_account(Stash(stash.clone())) } + /// Inspects and returns the corruption state of a ledger and bond, if any. + /// + /// Note: all operations in this method access directly the `Bonded` and `Ledger` storage maps + /// instead of using the [`StakingLedger`] API since the bond and/or ledger may be corrupted. + pub(crate) fn inspect_bond_state( + stash: &T::AccountId, + ) -> Result> { + let lock = T::Currency::balance_locked(crate::STAKING_ID, &stash); + + let controller = >::get(stash).ok_or_else(|| { + if lock == Zero::zero() { + Error::::NotStash + } else { + Error::::BadState + } + })?; + + match Ledger::::get(controller) { + Some(ledger) => + if ledger.stash != *stash { + Ok(LedgerIntegrityState::Corrupted) + } else { + if lock != ledger.total { + Ok(LedgerIntegrityState::LockCorrupted) + } else { + Ok(LedgerIntegrityState::Ok) + } + }, + None => Ok(LedgerIntegrityState::CorruptedKilled), + } + } + /// The total balance that can be slashed from a stash account as of right now. pub fn slashable_balance_of(stash: &T::AccountId) -> BalanceOf { // Weight note: consider making the stake accessible through stash. @@ -1838,12 +1870,12 @@ impl Pallet { "VoterList contains non-staker" ); + Self::check_ledgers()?; Self::check_bonded_consistency()?; Self::check_payees()?; Self::check_nominators()?; Self::check_exposures()?; Self::check_paged_exposures()?; - Self::check_ledgers()?; Self::check_count() } @@ -1852,6 +1884,7 @@ impl Pallet { /// * A bonded (stash, controller) pair should have only one associated ledger. I.e. if the /// ledger is bonded by stash, the controller account must not bond a different ledger. /// * A bonded (stash, controller) pair must have an associated ledger. + /// /// NOTE: these checks result in warnings only. Once /// is resolved, turn warns into check /// failures. @@ -1907,7 +1940,7 @@ impl Pallet { /// Invariants: /// * A bonded ledger should always have an assigned `Payee`. /// * The number of entries in `Payee` and of bonded staking ledgers *must* match. - /// * The stash account in the ledger must match that of the bonded acount. + /// * The stash account in the ledger must match that of the bonded account. fn check_payees() -> Result<(), TryRuntimeError> { for (stash, _) in Bonded::::iter() { ensure!(Payee::::get(&stash).is_some(), "bonded ledger does not have payee set"); @@ -1946,19 +1979,18 @@ impl Pallet { } /// Invariants: - /// * `ledger.controller` is not stored in the storage (but populated at retrieval). /// * Stake consistency: ledger.total == ledger.active + sum(ledger.unlocking). - /// * The controller keyeing the ledger and the ledger stash matches the state of the `Bonded` - /// storage. + /// * The ledger's controller and stash matches the associated `Bonded` tuple. + /// * Staking locked funds for every bonded stash should be the same as its ledger's total. + /// * Staking ledger and bond are not corrupted. fn check_ledgers() -> Result<(), TryRuntimeError> { Bonded::::iter() .map(|(stash, ctrl)| { - // `ledger.controller` is never stored in raw storage. - let raw = Ledger::::get(stash).unwrap_or_else(|| { - Ledger::::get(ctrl.clone()) - .expect("try_check: bonded stash/ctrl does not have an associated ledger") - }); - ensure!(raw.controller.is_none(), "raw storage controller should be None"); + // ensure locks consistency. + ensure!( + Self::inspect_bond_state(&stash) == Ok(LedgerIntegrityState::Ok), + "bond, ledger and/or staking lock inconsistent for a bonded stash." + ); // ensure ledger consistency. Self::ensure_ledger_consistent(ctrl) diff --git a/substrate/frame/staking/src/pallet/mod.rs b/substrate/frame/staking/src/pallet/mod.rs index 3448f7fe3b2a..16c4caa2e42d 100644 --- a/substrate/frame/staking/src/pallet/mod.rs +++ b/substrate/frame/staking/src/pallet/mod.rs @@ -26,7 +26,7 @@ use frame_support::{ pallet_prelude::*, traits::{ Currency, Defensive, DefensiveSaturating, EnsureOrigin, EstimateNextNewSession, Get, - LockableCurrency, OnUnbalanced, UnixTime, + InspectLockableCurrency, LockableCurrency, OnUnbalanced, UnixTime, WithdrawReasons, }, weights::Weight, BoundedVec, @@ -47,13 +47,13 @@ pub use impls::*; use crate::{ slashing, weights::WeightInfo, AccountIdLookupOf, ActiveEraInfo, BalanceOf, EraPayout, - EraRewardPoints, Exposure, ExposurePage, Forcing, MaxNominationsOf, NegativeImbalanceOf, - Nominations, NominationsQuota, PositiveImbalanceOf, RewardDestination, SessionInterface, - StakingLedger, UnappliedSlash, UnlockChunk, ValidatorPrefs, + EraRewardPoints, Exposure, ExposurePage, Forcing, LedgerIntegrityState, MaxNominationsOf, + NegativeImbalanceOf, Nominations, NominationsQuota, PositiveImbalanceOf, RewardDestination, + SessionInterface, StakingLedger, UnappliedSlash, UnlockChunk, ValidatorPrefs, }; // The speculative number of spans are used as an input of the weight annotation of -// [`Call::unbond`], as the post dipatch weight may depend on the number of slashing span on the +// [`Call::unbond`], as the post dispatch weight may depend on the number of slashing span on the // account which is not provided as an input. The value set should be conservative but sensible. pub(crate) const SPECULATIVE_NUM_SPANS: u32 = 32; @@ -87,10 +87,10 @@ pub mod pallet { pub trait Config: frame_system::Config { /// The staking balance. type Currency: LockableCurrency< - Self::AccountId, - Moment = BlockNumberFor, - Balance = Self::CurrencyBalance, - >; + Self::AccountId, + Moment = BlockNumberFor, + Balance = Self::CurrencyBalance, + > + InspectLockableCurrency; /// Just the `Currency::Balance` type; we have this item to allow us to constrain it to /// `From`. type CurrencyBalance: sp_runtime::traits::AtLeast32BitUnsigned @@ -795,6 +795,7 @@ pub mod pallet { } #[pallet::error] + #[derive(PartialEq)] pub enum Error { /// Not a controller account. NotController, @@ -854,6 +855,8 @@ pub mod pallet { BoundNotMet, /// Used when attempting to use deprecated controller account logic. ControllerDeprecated, + /// Cannot reset a ledger. + CannotRestoreLedger, } #[pallet::hooks] @@ -1133,7 +1136,7 @@ pub mod pallet { /// this call results in a complete removal of all the data related to the stash account. /// In this case, the `num_slashing_spans` must be larger or equal to the number of /// slashing spans associated with the stash account in the [`SlashingSpans`] storage type, - /// otherwise the call will fail. The call weight is directly propotional to + /// otherwise the call will fail. The call weight is directly proportional to /// `num_slashing_spans`. /// /// ## Complexity @@ -1375,7 +1378,7 @@ pub mod pallet { Ok(()) } - /// Increments the ideal number of validators upto maximum of + /// Increments the ideal number of validators up to maximum of /// `ElectionProviderBase::MaxWinners`. /// /// The dispatch origin must be Root. @@ -1400,7 +1403,7 @@ pub mod pallet { Ok(()) } - /// Scale up the ideal number of validators by a factor upto maximum of + /// Scale up the ideal number of validators by a factor up to maximum of /// `ElectionProviderBase::MaxWinners`. /// /// The dispatch origin must be Root. @@ -1979,6 +1982,108 @@ pub mod pallet { Ok(Some(T::WeightInfo::deprecate_controller_batch(controllers.len() as u32)).into()) } + + /// Restores the state of a ledger which is in an inconsistent state. + /// + /// The requirements to restore a ledger are the following: + /// * The stash is bonded; or + /// * The stash is not bonded but it has a staking lock left behind; or + /// * If the stash has an associated ledger and its state is inconsistent; or + /// * If the ledger is not corrupted *but* its staking lock is out of sync. + /// + /// The `maybe_*` input parameters will overwrite the corresponding data and metadata of the + /// ledger associated with the stash. If the input parameters are not set, the ledger will + /// be reset values from on-chain state. + #[pallet::call_index(29)] + #[pallet::weight(T::WeightInfo::restore_ledger())] + pub fn restore_ledger( + origin: OriginFor, + stash: T::AccountId, + maybe_controller: Option, + maybe_total: Option>, + maybe_unlocking: Option>, T::MaxUnlockingChunks>>, + ) -> DispatchResult { + T::AdminOrigin::ensure_origin(origin)?; + + let current_lock = T::Currency::balance_locked(crate::STAKING_ID, &stash); + let stash_balance = T::Currency::free_balance(&stash); + + let (new_controller, new_total) = match Self::inspect_bond_state(&stash) { + Ok(LedgerIntegrityState::Corrupted) => { + let new_controller = maybe_controller.unwrap_or(stash.clone()); + + let new_total = if let Some(total) = maybe_total { + let new_total = total.min(stash_balance); + // enforce lock == ledger.amount. + T::Currency::set_lock( + crate::STAKING_ID, + &stash, + new_total, + WithdrawReasons::all(), + ); + new_total + } else { + current_lock + }; + + Ok((new_controller, new_total)) + }, + Ok(LedgerIntegrityState::CorruptedKilled) => { + if current_lock == Zero::zero() { + // this case needs to restore both lock and ledger, so the new total needs + // to be given by the called since there's no way to restore the total + // on-chain. + ensure!(maybe_total.is_some(), Error::::CannotRestoreLedger); + Ok(( + stash.clone(), + maybe_total.expect("total exists as per the check above; qed."), + )) + } else { + Ok((stash.clone(), current_lock)) + } + }, + Ok(LedgerIntegrityState::LockCorrupted) => { + // ledger is not corrupted but its locks are out of sync. In this case, we need + // to enforce a new ledger.total and staking lock for this stash. + let new_total = + maybe_total.ok_or(Error::::CannotRestoreLedger)?.min(stash_balance); + T::Currency::set_lock( + crate::STAKING_ID, + &stash, + new_total, + WithdrawReasons::all(), + ); + + Ok((stash.clone(), new_total)) + }, + Err(Error::::BadState) => { + // the stash and ledger do not exist but lock is lingering. + T::Currency::remove_lock(crate::STAKING_ID, &stash); + ensure!( + Self::inspect_bond_state(&stash) == Err(Error::::NotStash), + Error::::BadState + ); + + return Ok(()); + }, + Ok(LedgerIntegrityState::Ok) | Err(_) => Err(Error::::CannotRestoreLedger), + }?; + + // re-bond stash and controller tuple. + Bonded::::insert(&stash, &new_controller); + + // resoter ledger state. + let mut ledger = StakingLedger::::new(stash.clone(), new_total); + ledger.controller = Some(new_controller); + ledger.unlocking = maybe_unlocking.unwrap_or_default(); + ledger.update()?; + + ensure!( + Self::inspect_bond_state(&stash) == Ok(LedgerIntegrityState::Ok), + Error::::BadState + ); + Ok(()) + } } } diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index 0a867a36ee5d..8e8084abc83b 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -269,7 +269,7 @@ fn change_controller_works() { assert_eq!(ledger.controller(), Some(stash)); // the raw storage ledger's controller is always `None`. however, we can still fetch the - // correct controller with `ledger.controler()`. + // correct controller with `ledger.controller()`. let raw_ledger = >::get(&stash).unwrap(); assert_eq!(raw_ledger.controller, None); @@ -1256,7 +1256,7 @@ fn bond_extra_controller_bad_state_works() { Bonded::::insert(31, 41); // we confirm that the ledger is in bad state: 31 has 41 as controller and when fetching - // the ledger associated with the controler 41, its stash is 41 (and not 31). + // the ledger associated with the controller 41, its stash is 41 (and not 31). assert_eq!(Ledger::::get(41).unwrap().stash, 41); // if the ledger is in this bad state, the `bond_extra` should fail. @@ -1814,7 +1814,7 @@ fn max_staked_rewards_works() { let total_payout = treasury_payout + validators_payout; // max stakers payout (without max staked rewards cap applied) is larger than the final - // validator rewards. The final payment and remainder should be adjusted by redestributing + // validator rewards. The final payment and remainder should be adjusted by redistributing // the era inflation to apply the cap... assert!(max_stakers_payout > validators_payout); @@ -4685,7 +4685,7 @@ fn bond_during_era_does_not_populate_legacy_claimed_rewards() { } ); - // make sure only era upto history depth is stored + // make sure only era up to history depth is stored let current_era = 99; mock::start_active_era(current_era); bond_validator(13, 1000); @@ -5411,7 +5411,7 @@ mod election_data_provider { Event::SnapshotVotersSizeExceeded { size: 75 } ); - // however, if the election voter size bounds were largers, the snapshot would + // however, if the election voter size bounds were larger, the snapshot would // include the electing voters of 70. let bounds = ElectionBoundsBuilder::default().voters_size(1_000.into()).build(); assert_eq!( @@ -6932,40 +6932,43 @@ mod ledger { setup_double_bonded_ledgers(); // Case 1: double bonded but not corrupted: - // stash 2 has controller 3: - assert_eq!(Bonded::::get(2), Some(3)); - assert_eq!(Ledger::::get(3).unwrap().stash, 2); + // stash 444 has controller 555: + assert_eq!(Bonded::::get(444), Some(555)); + assert_eq!(Ledger::::get(555).unwrap().stash, 444); - // stash 2 is also a controller of 1: - assert_eq!(Bonded::::get(1), Some(2)); - assert_eq!(StakingLedger::::paired_account(StakingAccount::Stash(1)), Some(2)); - assert_eq!(Ledger::::get(2).unwrap().stash, 1); + // stash 444 is also a controller of 333: + assert_eq!(Bonded::::get(333), Some(444)); + assert_eq!( + StakingLedger::::paired_account(StakingAccount::Stash(333)), + Some(444) + ); + assert_eq!(Ledger::::get(444).unwrap().stash, 333); - // although 2 is double bonded (it is a controller and a stash of different ledgers), + // although 444 is double bonded (it is a controller and a stash of different ledgers), // we can safely retrieve the ledger and mutate it since the correct ledger is // returned. - let ledger_result = StakingLedger::::get(StakingAccount::Stash(2)); - assert_eq!(ledger_result.unwrap().stash, 2); // correct ledger. + let ledger_result = StakingLedger::::get(StakingAccount::Stash(444)); + assert_eq!(ledger_result.unwrap().stash, 444); // correct ledger. - let ledger_result = StakingLedger::::get(StakingAccount::Controller(2)); - assert_eq!(ledger_result.unwrap().stash, 1); // correct ledger. + let ledger_result = StakingLedger::::get(StakingAccount::Controller(444)); + assert_eq!(ledger_result.unwrap().stash, 333); // correct ledger. - // fetching ledger 1 by its stash works. - let ledger_result = StakingLedger::::get(StakingAccount::Stash(1)); - assert_eq!(ledger_result.unwrap().stash, 1); + // fetching ledger 333 by its stash works. + let ledger_result = StakingLedger::::get(StakingAccount::Stash(333)); + assert_eq!(ledger_result.unwrap().stash, 333); // Case 2: corrupted ledger bonding. // in this case, we simulate what happens when fetching a ledger by stash returns a // ledger with a different stash. when this happens, we return an error instead of the // ledger to prevent ledger mutations. - let mut ledger = Ledger::::get(2).unwrap(); - assert_eq!(ledger.stash, 1); - ledger.stash = 2; - Ledger::::insert(2, ledger); + let mut ledger = Ledger::::get(444).unwrap(); + assert_eq!(ledger.stash, 333); + ledger.stash = 444; + Ledger::::insert(444, ledger); // now, we are prevented from fetching the ledger by stash from 1. It's associated // controller (2) is now bonding a ledger with a different stash (2, not 1). - assert!(StakingLedger::::get(StakingAccount::Stash(1)).is_err()); + assert!(StakingLedger::::get(StakingAccount::Stash(333)).is_err()); }) } @@ -7068,7 +7071,7 @@ mod ledger { #[test] fn deprecate_controller_batch_works_full_weight() { - ExtBuilder::default().build_and_execute(|| { + ExtBuilder::default().try_state(false).build_and_execute(|| { // Given: let start = 1001; @@ -7252,7 +7255,7 @@ mod ledger { let bounded_controllers: BoundedVec< _, ::MaxControllersInDeprecationBatch, - > = BoundedVec::try_from(vec![1, 2, 3, 4]).unwrap(); + > = BoundedVec::try_from(vec![333, 444, 555, 777]).unwrap(); assert_ok!(Staking::deprecate_controller_batch( RuntimeOrigin::root(), @@ -7275,7 +7278,7 @@ mod ledger { let bounded_controllers: BoundedVec< _, ::MaxControllersInDeprecationBatch, - > = BoundedVec::try_from(vec![4, 3, 2, 1]).unwrap(); + > = BoundedVec::try_from(vec![777, 555, 444, 333]).unwrap(); assert_ok!(Staking::deprecate_controller_batch( RuntimeOrigin::root(), @@ -7295,9 +7298,9 @@ mod ledger { setup_double_bonded_ledgers(); // in this case, setting controller works due to the ordering of the calls. - assert_ok!(Staking::set_controller(RuntimeOrigin::signed(1))); - assert_ok!(Staking::set_controller(RuntimeOrigin::signed(2))); - assert_ok!(Staking::set_controller(RuntimeOrigin::signed(3))); + assert_ok!(Staking::set_controller(RuntimeOrigin::signed(333))); + assert_ok!(Staking::set_controller(RuntimeOrigin::signed(444))); + assert_ok!(Staking::set_controller(RuntimeOrigin::signed(555))); }) } @@ -7306,17 +7309,400 @@ mod ledger { ExtBuilder::default().has_stakers(false).try_state(false).build_and_execute(|| { setup_double_bonded_ledgers(); - // setting the controller of ledger associated with stash 3 fails since its stash is a + // setting the controller of ledger associated with stash 555 fails since its stash is a // controller of another ledger. assert_noop!( - Staking::set_controller(RuntimeOrigin::signed(3)), + Staking::set_controller(RuntimeOrigin::signed(555)), Error::::BadState ); assert_noop!( - Staking::set_controller(RuntimeOrigin::signed(2)), + Staking::set_controller(RuntimeOrigin::signed(444)), Error::::BadState ); - assert_ok!(Staking::set_controller(RuntimeOrigin::signed(1))); + assert_ok!(Staking::set_controller(RuntimeOrigin::signed(333))); + }) + } +} + +mod ledger_recovery { + use super::*; + use frame_support::traits::InspectLockableCurrency; + + #[test] + fn inspect_recovery_ledger_simple_works() { + ExtBuilder::default().has_stakers(true).try_state(false).build_and_execute(|| { + setup_double_bonded_ledgers(); + + // non corrupted ledger. + assert_eq!(Staking::inspect_bond_state(&11).unwrap(), LedgerIntegrityState::Ok); + + // non bonded stash. + assert!(Bonded::::get(&1111).is_none()); + assert!(Staking::inspect_bond_state(&1111).is_err()); + + // double bonded but not corrupted. + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Ok); + }) + } + + #[test] + fn inspect_recovery_ledger_corupted_killed_works() { + ExtBuilder::default().has_stakers(true).try_state(false).build_and_execute(|| { + setup_double_bonded_ledgers(); + + let lock_333_before = Balances::balance_locked(crate::STAKING_ID, &333); + + // get into corrupted and killed ledger state by killing a corrupted ledger: + // init state: + // (333, 444) + // (444, 555) + // set_controller(444) to 444 + // (333, 444) -> corrupted + // (444, 444) + // kill(333) + // (444, 444) -> corrupted and None. + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Ok); + set_controller_no_checks(&444); + + // now try-state fails. + assert!(Staking::do_try_state(System::block_number()).is_err()); + + // 333 is corrupted since it's controller is linking 444 ledger. + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Corrupted); + // 444 however is OK. + assert_eq!(Staking::inspect_bond_state(&444).unwrap(), LedgerIntegrityState::Ok); + + // kill the corrupted ledger that is associated with stash 333. + assert_ok!(StakingLedger::::kill(&333)); + + // 333 bond is no more but it returns `BadState` because the lock on this stash is + // still set (see checks below). + assert_eq!(Staking::inspect_bond_state(&333), Err(Error::::BadState)); + // now the *other* ledger associated with 444 has been corrupted and killed (None). + assert_eq!( + Staking::inspect_bond_state(&444), + Ok(LedgerIntegrityState::CorruptedKilled) + ); + + // side effects on 333 - ledger, bonded, payee, lock should be completely empty. + // however, 333 lock remains. + assert_eq!(Balances::balance_locked(crate::STAKING_ID, &333), lock_333_before); // NOK + assert!(Bonded::::get(&333).is_none()); // OK + assert!(Payee::::get(&333).is_none()); // OK + assert!(Ledger::::get(&444).is_none()); // OK + + // side effects on 444 - ledger, bonded, payee, lock should remain be intact. + // however, 444 lock was removed. + assert_eq!(Balances::balance_locked(crate::STAKING_ID, &444), 0); // NOK + assert!(Bonded::::get(&444).is_some()); // OK + assert!(Payee::::get(&444).is_some()); // OK + assert!(Ledger::::get(&555).is_none()); // NOK + + assert!(Staking::do_try_state(System::block_number()).is_err()); + }) + } + + #[test] + fn inspect_recovery_ledger_corupted_killed_other_works() { + ExtBuilder::default().has_stakers(true).try_state(false).build_and_execute(|| { + setup_double_bonded_ledgers(); + + let lock_333_before = Balances::balance_locked(crate::STAKING_ID, &333); + + // get into corrupted and killed ledger state by killing a corrupted ledger: + // init state: + // (333, 444) + // (444, 555) + // set_controller(444) to 444 + // (333, 444) -> corrupted + // (444, 444) + // kill(444) + // (333, 444) -> corrupted and None + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Ok); + set_controller_no_checks(&444); + + // now try-state fails. + assert!(Staking::do_try_state(System::block_number()).is_err()); + + // 333 is corrupted since it's controller is linking 444 ledger. + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Corrupted); + // 444 however is OK. + assert_eq!(Staking::inspect_bond_state(&444).unwrap(), LedgerIntegrityState::Ok); + + // kill the *other* ledger that is double bonded but not corrupted. + assert_ok!(StakingLedger::::kill(&444)); + + // now 333 is corrupted and None through the *other* ledger being killed. + assert_eq!( + Staking::inspect_bond_state(&333).unwrap(), + LedgerIntegrityState::CorruptedKilled, + ); + // 444 is cleaned and not a stash anymore; no lock left behind. + assert_eq!(Ledger::::get(&444), None); + assert_eq!(Staking::inspect_bond_state(&444), Err(Error::::NotStash)); + + // side effects on 333 - ledger, bonded, payee, lock should be intact. + assert_eq!(Balances::balance_locked(crate::STAKING_ID, &333), lock_333_before); // OK + assert_eq!(Bonded::::get(&333), Some(444)); // OK + assert!(Payee::::get(&333).is_some()); // OK + // however, ledger associated with its controller was killed. + assert!(Ledger::::get(&444).is_none()); // NOK + + // side effects on 444 - ledger, bonded, payee, lock should be completely removed. + assert_eq!(Balances::balance_locked(crate::STAKING_ID, &444), 0); // OK + assert!(Bonded::::get(&444).is_none()); // OK + assert!(Payee::::get(&444).is_none()); // OK + assert!(Ledger::::get(&555).is_none()); // OK + + assert!(Staking::do_try_state(System::block_number()).is_err()); + }) + } + + #[test] + fn inspect_recovery_ledger_lock_corrupted_works() { + ExtBuilder::default().has_stakers(true).try_state(false).build_and_execute(|| { + setup_double_bonded_ledgers(); + + // get into lock corrupted ledger state by bond_extra on a ledger that is double bonded + // with a corrupted ledger. + // init state: + // (333, 444) + // (444, 555) + // set_controller(444) to 444 + // (333, 444) -> corrupted + // (444, 444) + // bond_extra(333, 10) -> lock corrupted on 444 + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Ok); + set_controller_no_checks(&444); + bond_extra_no_checks(&333, 10); + + // now try-state fails. + assert!(Staking::do_try_state(System::block_number()).is_err()); + + // 333 is corrupted since it's controller is linking 444 ledger. + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Corrupted); + // 444 ledger is not corrupted but locks got out of sync. + assert_eq!( + Staking::inspect_bond_state(&444).unwrap(), + LedgerIntegrityState::LockCorrupted + ); + }) + } + + // Corrupted ledger restore. + // + // * Double bonded and corrupted ledger. + #[test] + fn restore_ledger_corrupted_works() { + ExtBuilder::default().has_stakers(true).build_and_execute(|| { + setup_double_bonded_ledgers(); + + // get into corrupted and killed ledger state. + // init state: + // (333, 444) + // (444, 555) + // set_controller(444) to 444 + // (333, 444) -> corrupted + // (444, 444) + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Ok); + set_controller_no_checks(&444); + + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Corrupted); + + // now try-state fails. + assert!(Staking::do_try_state(System::block_number()).is_err()); + + // recover the ledger bonded by 333 stash. + assert_ok!(Staking::restore_ledger(RuntimeOrigin::root(), 333, None, None, None)); + + // try-state checks are ok now. + assert_ok!(Staking::do_try_state(System::block_number())); + }) + } + + // Corrupted and killed ledger restore. + // + // * Double bonded and corrupted ledger. + // * Ledger killed by own controller. + #[test] + fn restore_ledger_corrupted_killed_works() { + ExtBuilder::default().has_stakers(true).build_and_execute(|| { + setup_double_bonded_ledgers(); + + // ledger.total == lock + let total_444_before_corruption = Balances::balance_locked(crate::STAKING_ID, &444); + + // get into corrupted and killed ledger state by killing a corrupted ledger: + // init state: + // (333, 444) + // (444, 555) + // set_controller(444) to 444 + // (333, 444) -> corrupted + // (444, 444) + // kill(333) + // (444, 444) -> corrupted and None. + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Ok); + set_controller_no_checks(&444); + + // kill the corrupted ledger that is associated with stash 333. + assert_ok!(StakingLedger::::kill(&333)); + + // 333 bond is no more but it returns `BadState` because the lock on this stash is + // still set (see checks below). + assert_eq!(Staking::inspect_bond_state(&333), Err(Error::::BadState)); + // now the *other* ledger associated with 444 has been corrupted and killed (None). + assert!(Staking::ledger(StakingAccount::Stash(444)).is_err()); + + // try-state should fail. + assert!(Staking::do_try_state(System::block_number()).is_err()); + + // recover the ledger bonded by 333 stash. + assert_ok!(Staking::restore_ledger(RuntimeOrigin::root(), 333, None, None, None)); + + // for the try-state checks to pass, we also need to recover the stash 444 which is + // corrupted too by proxy of kill(333). Currently, both the lock and the ledger of 444 + // have been cleared so we need to provide the new amount to restore the ledger. + assert_noop!( + Staking::restore_ledger(RuntimeOrigin::root(), 444, None, None, None), + Error::::CannotRestoreLedger + ); + + assert_ok!(Staking::restore_ledger( + RuntimeOrigin::root(), + 444, + None, + Some(total_444_before_corruption), + None, + )); + + // try-state checks are ok now. + assert_ok!(Staking::do_try_state(System::block_number())); + }) + } + + // Corrupted and killed by *other* ledger restore. + // + // * Double bonded and corrupted ledger. + // * Ledger killed by own controller. + #[test] + fn restore_ledger_corrupted_killed_other_works() { + ExtBuilder::default().has_stakers(true).build_and_execute(|| { + setup_double_bonded_ledgers(); + + // get into corrupted and killed ledger state by killing a corrupted ledger: + // init state: + // (333, 444) + // (444, 555) + // set_controller(444) to 444 + // (333, 444) -> corrupted + // (444, 444) + // kill(444) + // (333, 444) -> corrupted and None + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Ok); + set_controller_no_checks(&444); + + // now try-state fails. + assert!(Staking::do_try_state(System::block_number()).is_err()); + + // 333 is corrupted since it's controller is linking 444 ledger. + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Corrupted); + // 444 however is OK. + assert_eq!(Staking::inspect_bond_state(&444).unwrap(), LedgerIntegrityState::Ok); + + // kill the *other* ledger that is double bonded but not corrupted. + assert_ok!(StakingLedger::::kill(&444)); + + // recover the ledger bonded by 333 stash. + assert_ok!(Staking::restore_ledger(RuntimeOrigin::root(), 333, None, None, None)); + + // 444 does not need recover in this case since it's been killed successfully. + assert_eq!(Staking::inspect_bond_state(&444), Err(Error::::NotStash)); + + // try-state checks are ok now. + assert_ok!(Staking::do_try_state(System::block_number())); + }) + } + + // Corrupted with bond_extra. + // + // * Double bonded and corrupted ledger. + // * Corrupted ledger calls `bond_extra` + #[test] + fn restore_ledger_corrupted_bond_extra_works() { + ExtBuilder::default().has_stakers(true).build_and_execute(|| { + setup_double_bonded_ledgers(); + + let lock_333_before = Balances::balance_locked(crate::STAKING_ID, &333); + let lock_444_before = Balances::balance_locked(crate::STAKING_ID, &444); + + // get into corrupted and killed ledger state by killing a corrupted ledger: + // init state: + // (333, 444) + // (444, 555) + // set_controller(444) to 444 + // (333, 444) -> corrupted + // (444, 444) + // bond_extra(444, 40) -> OK + // bond_extra(333, 30) -> locks out of sync + + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Ok); + set_controller_no_checks(&444); + + // now try-state fails. + assert!(Staking::do_try_state(System::block_number()).is_err()); + + // if 444 bonds extra, the locks remain in sync. + bond_extra_no_checks(&444, 40); + assert_eq!(Balances::balance_locked(crate::STAKING_ID, &333), lock_333_before); + assert_eq!(Balances::balance_locked(crate::STAKING_ID, &444), lock_444_before + 40); + + // however if 333 bonds extra, the wrong lock is updated. + bond_extra_no_checks(&333, 30); + assert_eq!( + Balances::balance_locked(crate::STAKING_ID, &333), + lock_444_before + 40 + 30 + ); //not OK + assert_eq!(Balances::balance_locked(crate::STAKING_ID, &444), lock_444_before + 40); // OK + + // recover the ledger bonded by 333 stash. Note that the total/lock needs to be + // re-written since on-chain data lock has become out of sync. + assert_ok!(Staking::restore_ledger( + RuntimeOrigin::root(), + 333, + None, + Some(lock_333_before + 30), + None + )); + + // now recover 444 that although it's not corrupted, its lock and ledger.total are out + // of sync. in which case, we need to explicitly set the ledger's lock and amount, + // otherwise the ledger recover will fail. + assert_noop!( + Staking::restore_ledger(RuntimeOrigin::root(), 444, None, None, None), + Error::::CannotRestoreLedger + ); + + //and enforcing a new ledger lock/total on this non-corrupted ledger will work. + assert_ok!(Staking::restore_ledger( + RuntimeOrigin::root(), + 444, + None, + Some(lock_444_before + 40), + None + )); + + // double-check that ledgers got to expected state and bond_extra done during the + // corrupted state is part of the recovered ledgers. + let ledger_333 = Bonded::::get(&333).and_then(Ledger::::get).unwrap(); + let ledger_444 = Bonded::::get(&444).and_then(Ledger::::get).unwrap(); + + assert_eq!(ledger_333.total, lock_333_before + 30); + assert_eq!(Balances::balance_locked(crate::STAKING_ID, &333), ledger_333.total); + assert_eq!(ledger_444.total, lock_444_before + 40); + assert_eq!(Balances::balance_locked(crate::STAKING_ID, &444), ledger_444.total); + + // try-state checks are ok now. + assert_ok!(Staking::do_try_state(System::block_number())); }) } } diff --git a/substrate/frame/staking/src/weights.rs b/substrate/frame/staking/src/weights.rs index 6f729e08ba5c..8a04a3dfb3f7 100644 --- a/substrate/frame/staking/src/weights.rs +++ b/substrate/frame/staking/src/weights.rs @@ -17,10 +17,10 @@ //! Autogenerated weights for `pallet_staking` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2024-01-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-q7z7ruxr-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -80,6 +80,7 @@ pub trait WeightInfo { fn chill_other() -> Weight; fn force_apply_min_commission() -> Weight; fn set_min_commission() -> Weight; + fn restore_ledger() -> Weight; } /// Weights for `pallet_staking` using the Substrate node and recommended hardware. @@ -87,21 +88,21 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { /// Storage: `Staking::Bonded` (r:1 w:1) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:0 w:1) - /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::Payee` (r:0 w:1) /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn bond() -> Weight { // Proof Size summary in bytes: - // Measured: `927` + // Measured: `1042` // Estimated: `4764` - // Minimum execution time: 42_042_000 picoseconds. - Weight::from_parts(43_292_000, 4764) - .saturating_add(T::DbWeight::get().reads(3_u64)) + // Minimum execution time: 48_753_000 picoseconds. + Weight::from_parts(50_539_000, 4764) + .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } /// Storage: `Staking::Bonded` (r:1 w:0) @@ -120,21 +121,21 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1990` // Estimated: `8877` - // Minimum execution time: 85_050_000 picoseconds. - Weight::from_parts(87_567_000, 8877) + // Minimum execution time: 92_701_000 picoseconds. + Weight::from_parts(95_657_000, 8877) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:0) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) /// Storage: `Staking::MinNominatorBond` (r:1 w:0) /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:1 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -147,41 +148,43 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2195` // Estimated: `8877` - // Minimum execution time: 89_076_000 picoseconds. - Weight::from_parts(92_715_000, 8877) + // Minimum execution time: 101_049_000 picoseconds. + Weight::from_parts(103_729_000, 8877) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Staking::CurrentEra` (r:1 w:0) - /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:0) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1115` + // Measured: `1297` // Estimated: `4764` - // Minimum execution time: 42_067_000 picoseconds. - Weight::from_parts(43_239_807, 4764) - // Standard Error: 831 - .saturating_add(Weight::from_parts(46_257, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(5_u64)) + // Minimum execution time: 51_672_000 picoseconds. + Weight::from_parts(53_817_441, 4764) + // Standard Error: 1_124 + .saturating_add(Weight::from_parts(49_168, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::SlashingSpans` (r:1 w:1) /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Staking::Bonded` (r:1 w:1) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -207,10 +210,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 86_490_000 picoseconds. - Weight::from_parts(95_358_751, 6248) - // Standard Error: 3_952 - .saturating_add(Weight::from_parts(1_294_907, 0).saturating_mul(s.into())) + // Minimum execution time: 92_846_000 picoseconds. + Weight::from_parts(102_158_606, 6248) + // Standard Error: 4_187 + .saturating_add(Weight::from_parts(1_436_364, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(13_u64)) .saturating_add(T::DbWeight::get().writes(11_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -218,6 +221,8 @@ impl WeightInfo for SubstrateWeight { } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::MinValidatorBond` (r:1 w:0) /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::MinCommission` (r:1 w:0) @@ -228,8 +233,6 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:0) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:1 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListNodes` (r:1 w:1) /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:1 w:1) @@ -242,31 +245,35 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1372` // Estimated: `4556` - // Minimum execution time: 50_326_000 picoseconds. - Weight::from_parts(52_253_000, 4556) + // Minimum execution time: 58_162_000 picoseconds. + Weight::from_parts(60_124_000, 4556) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:128 w:128) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) /// The range of component `k` is `[1, 128]`. fn kick(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1280 + k * (569 ±0)` + // Measured: `1815 + k * (572 ±0)` // Estimated: `4556 + k * (3033 ±0)` - // Minimum execution time: 29_305_000 picoseconds. - Weight::from_parts(32_199_604, 4556) - // Standard Error: 7_150 - .saturating_add(Weight::from_parts(6_437_124, 0).saturating_mul(k.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) + // Minimum execution time: 37_950_000 picoseconds. + Weight::from_parts(34_461_075, 4556) + // Standard Error: 8_013 + .saturating_add(Weight::from_parts(6_696_510, 0).saturating_mul(k.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) .saturating_add(Weight::from_parts(0, 3033).saturating_mul(k.into())) } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::MinNominatorBond` (r:1 w:0) /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:1) @@ -277,8 +284,6 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:1 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListNodes` (r:2 w:2) /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:1 w:1) @@ -292,10 +297,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1866 + n * (102 ±0)` // Estimated: `6248 + n * (2520 ±0)` - // Minimum execution time: 63_267_000 picoseconds. - Weight::from_parts(61_741_404, 6248) - // Standard Error: 12_955 - .saturating_add(Weight::from_parts(3_811_743, 0).saturating_mul(n.into())) + // Minimum execution time: 70_167_000 picoseconds. + Weight::from_parts(68_024_084, 6248) + // Standard Error: 14_256 + .saturating_add(Weight::from_parts(4_195_757, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(6_u64)) @@ -303,6 +308,8 @@ impl WeightInfo for SubstrateWeight { } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Validators` (r:1 w:0) /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:1) @@ -317,11 +324,11 @@ impl WeightInfo for SubstrateWeight { /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill() -> Weight { // Proof Size summary in bytes: - // Measured: `1650` + // Measured: `1816` // Estimated: `6248` - // Minimum execution time: 52_862_000 picoseconds. - Weight::from_parts(54_108_000, 6248) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Minimum execution time: 61_730_000 picoseconds. + Weight::from_parts(63_430_000, 6248) + .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } /// Storage: `Staking::Ledger` (r:1 w:0) @@ -334,37 +341,37 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `902` // Estimated: `4556` - // Minimum execution time: 16_350_000 picoseconds. - Weight::from_parts(16_802_000, 4556) + // Minimum execution time: 20_857_000 picoseconds. + Weight::from_parts(21_615_000, 4556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Staking::Payee` (r:1 w:1) - /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:1 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn update_payee() -> Weight { // Proof Size summary in bytes: // Measured: `969` // Estimated: `4556` - // Minimum execution time: 19_981_000 picoseconds. - Weight::from_parts(20_539_000, 4556) + // Minimum execution time: 24_739_000 picoseconds. + Weight::from_parts(25_785_000, 4556) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::Bonded` (r:1 w:1) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:1 w:2) + /// Storage: `Staking::Ledger` (r:2 w:2) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) fn set_controller() -> Weight { // Proof Size summary in bytes: // Measured: `902` - // Estimated: `4556` - // Minimum execution time: 19_304_000 picoseconds. - Weight::from_parts(20_000_000, 4556) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Estimated: `8122` + // Minimum execution time: 24_622_000 picoseconds. + Weight::from_parts(25_220_000, 8122) + .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Staking::ValidatorCount` (r:0 w:1) @@ -373,8 +380,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_568_000 picoseconds. - Weight::from_parts(2_708_000, 0) + // Minimum execution time: 2_634_000 picoseconds. + Weight::from_parts(2_842_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -383,8 +390,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_950_000 picoseconds. - Weight::from_parts(8_348_000, 0) + // Minimum execution time: 8_496_000 picoseconds. + Weight::from_parts(9_016_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -393,8 +400,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_967_000 picoseconds. - Weight::from_parts(8_222_000, 0) + // Minimum execution time: 8_510_000 picoseconds. + Weight::from_parts(8_893_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -403,8 +410,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_006_000 picoseconds. - Weight::from_parts(8_440_000, 0) + // Minimum execution time: 8_243_000 picoseconds. + Weight::from_parts(8_678_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::Invulnerables` (r:0 w:1) @@ -414,30 +421,30 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_524_000 picoseconds. - Weight::from_parts(3_123_608, 0) - // Standard Error: 59 - .saturating_add(Weight::from_parts(11_596, 0).saturating_mul(v.into())) + // Minimum execution time: 2_781_000 picoseconds. + Weight::from_parts(3_441_708, 0) + // Standard Error: 58 + .saturating_add(Weight::from_parts(11_811, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: `Staking::Ledger` (r:5900 w:11800) + /// Storage: `Staking::Ledger` (r:11800 w:11800) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:5900 w:5900) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Payee` (r:5900 w:0) /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:0 w:5900) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// The range of component `i` is `[0, 5900]`. fn deprecate_controller_batch(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1356 + i * (151 ±0)` - // Estimated: `990 + i * (3566 ±0)` - // Minimum execution time: 2_092_000 picoseconds. - Weight::from_parts(2_258_000, 990) - // Standard Error: 32_695 - .saturating_add(Weight::from_parts(16_669_219, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(i.into()))) + // Measured: `1746 + i * (229 ±0)` + // Estimated: `990 + i * (7132 ±0)` + // Minimum execution time: 5_331_000 picoseconds. + Weight::from_parts(5_511_000, 990) + // Standard Error: 66_734 + .saturating_add(Weight::from_parts(31_157_413, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(i.into()))) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(i.into()))) - .saturating_add(Weight::from_parts(0, 3566).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(0, 7132).saturating_mul(i.into())) } /// Storage: `Staking::SlashingSpans` (r:1 w:1) /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -472,10 +479,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 84_275_000 picoseconds. - Weight::from_parts(92_512_416, 6248) - // Standard Error: 3_633 - .saturating_add(Weight::from_parts(1_315_923, 0).saturating_mul(s.into())) + // Minimum execution time: 89_473_000 picoseconds. + Weight::from_parts(98_055_990, 6248) + // Standard Error: 4_159 + .saturating_add(Weight::from_parts(1_398_203, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(13_u64)) .saturating_add(T::DbWeight::get().writes(12_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -488,10 +495,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `66672` // Estimated: `70137` - // Minimum execution time: 101_707_000 picoseconds. - Weight::from_parts(912_819_462, 70137) - // Standard Error: 57_547 - .saturating_add(Weight::from_parts(4_856_799, 0).saturating_mul(s.into())) + // Minimum execution time: 102_480_000 picoseconds. + Weight::from_parts(1_165_789_820, 70137) + // Standard Error: 77_157 + .saturating_add(Weight::from_parts(6_489_253, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -528,10 +535,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `33297 + n * (377 ±0)` // Estimated: `30944 + n * (3774 ±0)` - // Minimum execution time: 138_657_000 picoseconds. - Weight::from_parts(167_173_445, 30944) - // Standard Error: 25_130 - .saturating_add(Weight::from_parts(44_566_012, 0).saturating_mul(n.into())) + // Minimum execution time: 156_890_000 picoseconds. + Weight::from_parts(202_972_688, 30944) + // Standard Error: 29_972 + .saturating_add(Weight::from_parts(48_226_698, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(14_u64)) .saturating_add(T::DbWeight::get().reads((6_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) @@ -555,10 +562,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1991 + l * (7 ±0)` // Estimated: `8877` - // Minimum execution time: 80_061_000 picoseconds. - Weight::from_parts(82_836_434, 8877) - // Standard Error: 4_348 - .saturating_add(Weight::from_parts(75_744, 0).saturating_mul(l.into())) + // Minimum execution time: 88_482_000 picoseconds. + Weight::from_parts(92_616_600, 8877) + // Standard Error: 4_411 + .saturating_add(Weight::from_parts(117_722, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -593,10 +600,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 92_560_000 picoseconds. - Weight::from_parts(97_684_741, 6248) - // Standard Error: 3_361 - .saturating_add(Weight::from_parts(1_292_732, 0).saturating_mul(s.into())) + // Minimum execution time: 98_489_000 picoseconds. + Weight::from_parts(102_968_643, 6248) + // Standard Error: 4_823 + .saturating_add(Weight::from_parts(1_420_838, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(11_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -642,12 +649,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0 + n * (720 ±0) + v * (3598 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 564_963_000 picoseconds. - Weight::from_parts(569_206_000, 512390) - // Standard Error: 2_033_235 - .saturating_add(Weight::from_parts(68_025_841, 0).saturating_mul(v.into())) - // Standard Error: 202_600 - .saturating_add(Weight::from_parts(17_916_770, 0).saturating_mul(n.into())) + // Minimum execution time: 604_820_000 picoseconds. + Weight::from_parts(608_838_000, 512390) + // Standard Error: 2_300_345 + .saturating_add(Weight::from_parts(72_980_573, 0).saturating_mul(v.into())) + // Standard Error: 229_216 + .saturating_add(Weight::from_parts(20_739_416, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(206_u64)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -678,12 +685,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3175 + n * (911 ±0) + v * (395 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 32_196_540_000 picoseconds. - Weight::from_parts(32_341_871_000, 512390) - // Standard Error: 354_657 - .saturating_add(Weight::from_parts(5_143_440, 0).saturating_mul(v.into())) - // Standard Error: 354_657 - .saturating_add(Weight::from_parts(3_328_189, 0).saturating_mul(n.into())) + // Minimum execution time: 37_380_439_000 picoseconds. + Weight::from_parts(38_187_734_000, 512390) + // Standard Error: 425_319 + .saturating_add(Weight::from_parts(6_001_288, 0).saturating_mul(v.into())) + // Standard Error: 425_319 + .saturating_add(Weight::from_parts(4_129_446, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(201_u64)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -700,10 +707,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `979 + v * (50 ±0)` // Estimated: `3510 + v * (2520 ±0)` - // Minimum execution time: 2_381_903_000 picoseconds. - Weight::from_parts(32_693_059, 3510) - // Standard Error: 10_000 - .saturating_add(Weight::from_parts(4_736_173, 0).saturating_mul(v.into())) + // Minimum execution time: 2_572_838_000 picoseconds. + Weight::from_parts(67_632_557, 3510) + // Standard Error: 12_028 + .saturating_add(Weight::from_parts(5_117_459, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(v.into())) @@ -714,6 +721,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxStakedRewards` (r:0 w:1) + /// Proof: `Staking::MaxStakedRewards` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::ChillThreshold` (r:0 w:1) /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) @@ -724,9 +733,9 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_434_000 picoseconds. - Weight::from_parts(5_742_000, 0) - .saturating_add(T::DbWeight::get().writes(6_u64)) + // Minimum execution time: 5_962_000 picoseconds. + Weight::from_parts(6_497_000, 0) + .saturating_add(T::DbWeight::get().writes(7_u64)) } /// Storage: `Staking::MinCommission` (r:0 w:1) /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -734,6 +743,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxStakedRewards` (r:0 w:1) + /// Proof: `Staking::MaxStakedRewards` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::ChillThreshold` (r:0 w:1) /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) @@ -744,9 +755,9 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_588_000 picoseconds. - Weight::from_parts(4_854_000, 0) - .saturating_add(T::DbWeight::get().writes(6_u64)) + // Minimum execution time: 5_227_000 picoseconds. + Weight::from_parts(5_496_000, 0) + .saturating_add(T::DbWeight::get().writes(7_u64)) } /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) @@ -774,8 +785,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1939` // Estimated: `6248` - // Minimum execution time: 68_780_000 picoseconds. - Weight::from_parts(71_479_000, 6248) + // Minimum execution time: 75_129_000 picoseconds. + Weight::from_parts(77_498_000, 6248) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -787,8 +798,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `691` // Estimated: `3510` - // Minimum execution time: 12_268_000 picoseconds. - Weight::from_parts(12_661_000, 3510) + // Minimum execution time: 13_488_000 picoseconds. + Weight::from_parts(14_183_000, 3510) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -798,31 +809,50 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_071_000 picoseconds. - Weight::from_parts(3_334_000, 0) + // Minimum execution time: 3_368_000 picoseconds. + Weight::from_parts(3_582_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + fn restore_ledger() -> Weight { + // Proof Size summary in bytes: + // Measured: `1047` + // Estimated: `4764` + // Minimum execution time: 44_876_000 picoseconds. + Weight::from_parts(46_353_000, 4764) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + } } // For backwards compatibility and tests. impl WeightInfo for () { /// Storage: `Staking::Bonded` (r:1 w:1) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:0 w:1) - /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::Payee` (r:0 w:1) /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn bond() -> Weight { // Proof Size summary in bytes: - // Measured: `927` + // Measured: `1042` // Estimated: `4764` - // Minimum execution time: 42_042_000 picoseconds. - Weight::from_parts(43_292_000, 4764) - .saturating_add(RocksDbWeight::get().reads(3_u64)) + // Minimum execution time: 48_753_000 picoseconds. + Weight::from_parts(50_539_000, 4764) + .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } /// Storage: `Staking::Bonded` (r:1 w:0) @@ -841,21 +871,21 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1990` // Estimated: `8877` - // Minimum execution time: 85_050_000 picoseconds. - Weight::from_parts(87_567_000, 8877) + // Minimum execution time: 92_701_000 picoseconds. + Weight::from_parts(95_657_000, 8877) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:0) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) /// Storage: `Staking::MinNominatorBond` (r:1 w:0) /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:1 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -868,41 +898,43 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2195` // Estimated: `8877` - // Minimum execution time: 89_076_000 picoseconds. - Weight::from_parts(92_715_000, 8877) + // Minimum execution time: 101_049_000 picoseconds. + Weight::from_parts(103_729_000, 8877) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Staking::CurrentEra` (r:1 w:0) - /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:0) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1115` + // Measured: `1297` // Estimated: `4764` - // Minimum execution time: 42_067_000 picoseconds. - Weight::from_parts(43_239_807, 4764) - // Standard Error: 831 - .saturating_add(Weight::from_parts(46_257, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(5_u64)) + // Minimum execution time: 51_672_000 picoseconds. + Weight::from_parts(53_817_441, 4764) + // Standard Error: 1_124 + .saturating_add(Weight::from_parts(49_168, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::SlashingSpans` (r:1 w:1) /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Staking::Bonded` (r:1 w:1) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -928,10 +960,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 86_490_000 picoseconds. - Weight::from_parts(95_358_751, 6248) - // Standard Error: 3_952 - .saturating_add(Weight::from_parts(1_294_907, 0).saturating_mul(s.into())) + // Minimum execution time: 92_846_000 picoseconds. + Weight::from_parts(102_158_606, 6248) + // Standard Error: 4_187 + .saturating_add(Weight::from_parts(1_436_364, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(13_u64)) .saturating_add(RocksDbWeight::get().writes(11_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -939,6 +971,8 @@ impl WeightInfo for () { } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::MinValidatorBond` (r:1 w:0) /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::MinCommission` (r:1 w:0) @@ -949,8 +983,6 @@ impl WeightInfo for () { /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:0) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:1 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListNodes` (r:1 w:1) /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:1 w:1) @@ -963,31 +995,35 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1372` // Estimated: `4556` - // Minimum execution time: 50_326_000 picoseconds. - Weight::from_parts(52_253_000, 4556) + // Minimum execution time: 58_162_000 picoseconds. + Weight::from_parts(60_124_000, 4556) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:128 w:128) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) /// The range of component `k` is `[1, 128]`. fn kick(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1280 + k * (569 ±0)` + // Measured: `1815 + k * (572 ±0)` // Estimated: `4556 + k * (3033 ±0)` - // Minimum execution time: 29_305_000 picoseconds. - Weight::from_parts(32_199_604, 4556) - // Standard Error: 7_150 - .saturating_add(Weight::from_parts(6_437_124, 0).saturating_mul(k.into())) - .saturating_add(RocksDbWeight::get().reads(1_u64)) + // Minimum execution time: 37_950_000 picoseconds. + Weight::from_parts(34_461_075, 4556) + // Standard Error: 8_013 + .saturating_add(Weight::from_parts(6_696_510, 0).saturating_mul(k.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) .saturating_add(Weight::from_parts(0, 3033).saturating_mul(k.into())) } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::MinNominatorBond` (r:1 w:0) /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:1) @@ -998,8 +1034,6 @@ impl WeightInfo for () { /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:1 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListNodes` (r:2 w:2) /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:1 w:1) @@ -1013,10 +1047,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1866 + n * (102 ±0)` // Estimated: `6248 + n * (2520 ±0)` - // Minimum execution time: 63_267_000 picoseconds. - Weight::from_parts(61_741_404, 6248) - // Standard Error: 12_955 - .saturating_add(Weight::from_parts(3_811_743, 0).saturating_mul(n.into())) + // Minimum execution time: 70_167_000 picoseconds. + Weight::from_parts(68_024_084, 6248) + // Standard Error: 14_256 + .saturating_add(Weight::from_parts(4_195_757, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(6_u64)) @@ -1024,6 +1058,8 @@ impl WeightInfo for () { } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Validators` (r:1 w:0) /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:1) @@ -1038,11 +1074,11 @@ impl WeightInfo for () { /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill() -> Weight { // Proof Size summary in bytes: - // Measured: `1650` + // Measured: `1816` // Estimated: `6248` - // Minimum execution time: 52_862_000 picoseconds. - Weight::from_parts(54_108_000, 6248) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Minimum execution time: 61_730_000 picoseconds. + Weight::from_parts(63_430_000, 6248) + .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } /// Storage: `Staking::Ledger` (r:1 w:0) @@ -1055,37 +1091,37 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `902` // Estimated: `4556` - // Minimum execution time: 16_350_000 picoseconds. - Weight::from_parts(16_802_000, 4556) + // Minimum execution time: 20_857_000 picoseconds. + Weight::from_parts(21_615_000, 4556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Staking::Payee` (r:1 w:1) - /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:1 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn update_payee() -> Weight { // Proof Size summary in bytes: // Measured: `969` // Estimated: `4556` - // Minimum execution time: 19_981_000 picoseconds. - Weight::from_parts(20_539_000, 4556) + // Minimum execution time: 24_739_000 picoseconds. + Weight::from_parts(25_785_000, 4556) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::Bonded` (r:1 w:1) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:1 w:2) + /// Storage: `Staking::Ledger` (r:2 w:2) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) fn set_controller() -> Weight { // Proof Size summary in bytes: // Measured: `902` - // Estimated: `4556` - // Minimum execution time: 19_304_000 picoseconds. - Weight::from_parts(20_000_000, 4556) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Estimated: `8122` + // Minimum execution time: 24_622_000 picoseconds. + Weight::from_parts(25_220_000, 8122) + .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Staking::ValidatorCount` (r:0 w:1) @@ -1094,8 +1130,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_568_000 picoseconds. - Weight::from_parts(2_708_000, 0) + // Minimum execution time: 2_634_000 picoseconds. + Weight::from_parts(2_842_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -1104,8 +1140,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_950_000 picoseconds. - Weight::from_parts(8_348_000, 0) + // Minimum execution time: 8_496_000 picoseconds. + Weight::from_parts(9_016_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -1114,8 +1150,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_967_000 picoseconds. - Weight::from_parts(8_222_000, 0) + // Minimum execution time: 8_510_000 picoseconds. + Weight::from_parts(8_893_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -1124,8 +1160,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_006_000 picoseconds. - Weight::from_parts(8_440_000, 0) + // Minimum execution time: 8_243_000 picoseconds. + Weight::from_parts(8_678_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::Invulnerables` (r:0 w:1) @@ -1135,30 +1171,30 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_524_000 picoseconds. - Weight::from_parts(3_123_608, 0) - // Standard Error: 59 - .saturating_add(Weight::from_parts(11_596, 0).saturating_mul(v.into())) + // Minimum execution time: 2_781_000 picoseconds. + Weight::from_parts(3_441_708, 0) + // Standard Error: 58 + .saturating_add(Weight::from_parts(11_811, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: `Staking::Ledger` (r:5900 w:11800) + /// Storage: `Staking::Ledger` (r:11800 w:11800) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:5900 w:5900) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Payee` (r:5900 w:0) /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:0 w:5900) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// The range of component `i` is `[0, 5900]`. fn deprecate_controller_batch(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1356 + i * (151 ±0)` - // Estimated: `990 + i * (3566 ±0)` - // Minimum execution time: 2_092_000 picoseconds. - Weight::from_parts(2_258_000, 990) - // Standard Error: 32_695 - .saturating_add(Weight::from_parts(16_669_219, 0).saturating_mul(i.into())) - .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(i.into()))) + // Measured: `1746 + i * (229 ±0)` + // Estimated: `990 + i * (7132 ±0)` + // Minimum execution time: 5_331_000 picoseconds. + Weight::from_parts(5_511_000, 990) + // Standard Error: 66_734 + .saturating_add(Weight::from_parts(31_157_413, 0).saturating_mul(i.into())) + .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(i.into()))) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(i.into()))) - .saturating_add(Weight::from_parts(0, 3566).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(0, 7132).saturating_mul(i.into())) } /// Storage: `Staking::SlashingSpans` (r:1 w:1) /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -1193,10 +1229,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 84_275_000 picoseconds. - Weight::from_parts(92_512_416, 6248) - // Standard Error: 3_633 - .saturating_add(Weight::from_parts(1_315_923, 0).saturating_mul(s.into())) + // Minimum execution time: 89_473_000 picoseconds. + Weight::from_parts(98_055_990, 6248) + // Standard Error: 4_159 + .saturating_add(Weight::from_parts(1_398_203, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(13_u64)) .saturating_add(RocksDbWeight::get().writes(12_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -1209,10 +1245,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `66672` // Estimated: `70137` - // Minimum execution time: 101_707_000 picoseconds. - Weight::from_parts(912_819_462, 70137) - // Standard Error: 57_547 - .saturating_add(Weight::from_parts(4_856_799, 0).saturating_mul(s.into())) + // Minimum execution time: 102_480_000 picoseconds. + Weight::from_parts(1_165_789_820, 70137) + // Standard Error: 77_157 + .saturating_add(Weight::from_parts(6_489_253, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1249,10 +1285,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `33297 + n * (377 ±0)` // Estimated: `30944 + n * (3774 ±0)` - // Minimum execution time: 138_657_000 picoseconds. - Weight::from_parts(167_173_445, 30944) - // Standard Error: 25_130 - .saturating_add(Weight::from_parts(44_566_012, 0).saturating_mul(n.into())) + // Minimum execution time: 156_890_000 picoseconds. + Weight::from_parts(202_972_688, 30944) + // Standard Error: 29_972 + .saturating_add(Weight::from_parts(48_226_698, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(14_u64)) .saturating_add(RocksDbWeight::get().reads((6_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) @@ -1276,10 +1312,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1991 + l * (7 ±0)` // Estimated: `8877` - // Minimum execution time: 80_061_000 picoseconds. - Weight::from_parts(82_836_434, 8877) - // Standard Error: 4_348 - .saturating_add(Weight::from_parts(75_744, 0).saturating_mul(l.into())) + // Minimum execution time: 88_482_000 picoseconds. + Weight::from_parts(92_616_600, 8877) + // Standard Error: 4_411 + .saturating_add(Weight::from_parts(117_722, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -1314,10 +1350,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 92_560_000 picoseconds. - Weight::from_parts(97_684_741, 6248) - // Standard Error: 3_361 - .saturating_add(Weight::from_parts(1_292_732, 0).saturating_mul(s.into())) + // Minimum execution time: 98_489_000 picoseconds. + Weight::from_parts(102_968_643, 6248) + // Standard Error: 4_823 + .saturating_add(Weight::from_parts(1_420_838, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(11_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -1363,12 +1399,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0 + n * (720 ±0) + v * (3598 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 564_963_000 picoseconds. - Weight::from_parts(569_206_000, 512390) - // Standard Error: 2_033_235 - .saturating_add(Weight::from_parts(68_025_841, 0).saturating_mul(v.into())) - // Standard Error: 202_600 - .saturating_add(Weight::from_parts(17_916_770, 0).saturating_mul(n.into())) + // Minimum execution time: 604_820_000 picoseconds. + Weight::from_parts(608_838_000, 512390) + // Standard Error: 2_300_345 + .saturating_add(Weight::from_parts(72_980_573, 0).saturating_mul(v.into())) + // Standard Error: 229_216 + .saturating_add(Weight::from_parts(20_739_416, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(206_u64)) .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -1399,12 +1435,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3175 + n * (911 ±0) + v * (395 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 32_196_540_000 picoseconds. - Weight::from_parts(32_341_871_000, 512390) - // Standard Error: 354_657 - .saturating_add(Weight::from_parts(5_143_440, 0).saturating_mul(v.into())) - // Standard Error: 354_657 - .saturating_add(Weight::from_parts(3_328_189, 0).saturating_mul(n.into())) + // Minimum execution time: 37_380_439_000 picoseconds. + Weight::from_parts(38_187_734_000, 512390) + // Standard Error: 425_319 + .saturating_add(Weight::from_parts(6_001_288, 0).saturating_mul(v.into())) + // Standard Error: 425_319 + .saturating_add(Weight::from_parts(4_129_446, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(201_u64)) .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -1421,10 +1457,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `979 + v * (50 ±0)` // Estimated: `3510 + v * (2520 ±0)` - // Minimum execution time: 2_381_903_000 picoseconds. - Weight::from_parts(32_693_059, 3510) - // Standard Error: 10_000 - .saturating_add(Weight::from_parts(4_736_173, 0).saturating_mul(v.into())) + // Minimum execution time: 2_572_838_000 picoseconds. + Weight::from_parts(67_632_557, 3510) + // Standard Error: 12_028 + .saturating_add(Weight::from_parts(5_117_459, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(v.into())) @@ -1435,6 +1471,8 @@ impl WeightInfo for () { /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxStakedRewards` (r:0 w:1) + /// Proof: `Staking::MaxStakedRewards` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::ChillThreshold` (r:0 w:1) /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) @@ -1445,9 +1483,9 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_434_000 picoseconds. - Weight::from_parts(5_742_000, 0) - .saturating_add(RocksDbWeight::get().writes(6_u64)) + // Minimum execution time: 5_962_000 picoseconds. + Weight::from_parts(6_497_000, 0) + .saturating_add(RocksDbWeight::get().writes(7_u64)) } /// Storage: `Staking::MinCommission` (r:0 w:1) /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -1455,6 +1493,8 @@ impl WeightInfo for () { /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxStakedRewards` (r:0 w:1) + /// Proof: `Staking::MaxStakedRewards` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::ChillThreshold` (r:0 w:1) /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) @@ -1465,9 +1505,9 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_588_000 picoseconds. - Weight::from_parts(4_854_000, 0) - .saturating_add(RocksDbWeight::get().writes(6_u64)) + // Minimum execution time: 5_227_000 picoseconds. + Weight::from_parts(5_496_000, 0) + .saturating_add(RocksDbWeight::get().writes(7_u64)) } /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) @@ -1495,8 +1535,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1939` // Estimated: `6248` - // Minimum execution time: 68_780_000 picoseconds. - Weight::from_parts(71_479_000, 6248) + // Minimum execution time: 75_129_000 picoseconds. + Weight::from_parts(77_498_000, 6248) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -1508,8 +1548,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `691` // Estimated: `3510` - // Minimum execution time: 12_268_000 picoseconds. - Weight::from_parts(12_661_000, 3510) + // Minimum execution time: 13_488_000 picoseconds. + Weight::from_parts(14_183_000, 3510) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1519,8 +1559,27 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_071_000 picoseconds. - Weight::from_parts(3_334_000, 0) + // Minimum execution time: 3_368_000 picoseconds. + Weight::from_parts(3_582_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + fn restore_ledger() -> Weight { + // Proof Size summary in bytes: + // Measured: `1047` + // Estimated: `4764` + // Minimum execution time: 44_876_000 picoseconds. + Weight::from_parts(46_353_000, 4764) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } } diff --git a/substrate/frame/state-trie-migration/Cargo.toml b/substrate/frame/state-trie-migration/Cargo.toml index e3462250930c..417ed2868172 100644 --- a/substrate/frame/state-trie-migration/Cargo.toml +++ b/substrate/frame/state-trie-migration/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } thousands = { version = "0.2.0", optional = true } zstd = { version = "0.12.4", default-features = false, optional = true } diff --git a/substrate/frame/statement/Cargo.toml b/substrate/frame/statement/Cargo.toml index 3aab8e44f871..c26976c5e08f 100644 --- a/substrate/frame/statement/Cargo.toml +++ b/substrate/frame/statement/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } sp-statement-store = { path = "../../primitives/statement-store", default-features = false } diff --git a/substrate/frame/sudo/Cargo.toml b/substrate/frame/sudo/Cargo.toml index c09654fbc4d7..09d32eb15b65 100644 --- a/substrate/frame/sudo/Cargo.toml +++ b/substrate/frame/sudo/Cargo.toml @@ -18,13 +18,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } sp-io = { path = "../../primitives/io", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false } -docify = "0.2.7" +docify = "0.2.8" [dev-dependencies] sp-core = { path = "../../primitives/core" } diff --git a/substrate/frame/sudo/src/lib.rs b/substrate/frame/sudo/src/lib.rs index 97d496314d14..d5c01014b1bc 100644 --- a/substrate/frame/sudo/src/lib.rs +++ b/substrate/frame/sudo/src/lib.rs @@ -38,7 +38,7 @@ //! In Substrate blockchains, pallets may contain dispatchable calls that can only be called at //! the system level of the chain (i.e. dispatchables that require a `Root` origin). //! Setting a privileged account, called the _sudo key_, allows you to make such calls as an -//! extrinisic. +//! extrinsic. //! //! Here's an example of a privileged function in another pallet: //! diff --git a/substrate/frame/support/Cargo.toml b/substrate/frame/support/Cargo.toml index 25bfb8d7a2e3..a891a4e89f7d 100644 --- a/substrate/frame/support/Cargo.toml +++ b/substrate/frame/support/Cargo.toml @@ -22,7 +22,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", "max-encoded-len", ] } -scale-info = { version = "2.10.0", default-features = false, features = [ +scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } frame-metadata = { version = "16.0.0", default-features = false, features = [ @@ -57,7 +57,7 @@ k256 = { version = "0.13.1", default-features = false, features = ["ecdsa"] } environmental = { version = "1.1.4", default-features = false } sp-genesis-builder = { path = "../../primitives/genesis-builder", default-features = false } serde_json = { features = ["alloc"], workspace = true } -docify = "0.2.7" +docify = "0.2.8" static_assertions = "1.1.0" aquamarine = { version = "0.5.0" } diff --git a/substrate/frame/support/procedural/Cargo.toml b/substrate/frame/support/procedural/Cargo.toml index dd0688f2ad06..9f8727f7adef 100644 --- a/substrate/frame/support/procedural/Cargo.toml +++ b/substrate/frame/support/procedural/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -derive-syn-parse = "0.1.5" +derive-syn-parse = "0.2.0" Inflector = "0.11.4" cfg-expr = "0.15.5" itertools = "0.10.3" diff --git a/substrate/frame/support/procedural/src/benchmark.rs b/substrate/frame/support/procedural/src/benchmark.rs index de04313763dd..69ddc1e6a29f 100644 --- a/substrate/frame/support/procedural/src/benchmark.rs +++ b/substrate/frame/support/procedural/src/benchmark.rs @@ -40,10 +40,14 @@ mod keywords { custom_keyword!(benchmarks); custom_keyword!(block); custom_keyword!(extra); + custom_keyword!(pov_mode); custom_keyword!(extrinsic_call); custom_keyword!(skip_meta); custom_keyword!(BenchmarkError); custom_keyword!(Result); + custom_keyword!(MaxEncodedLen); + custom_keyword!(Measured); + custom_keyword!(Ignored); pub const BENCHMARK_TOKEN: &str = stringify!(benchmark); pub const BENCHMARKS_TOKEN: &str = stringify!(benchmarks); @@ -73,51 +77,158 @@ struct RangeArgs { struct BenchmarkAttrs { skip_meta: bool, extra: bool, + pov_mode: Option, } /// Represents a single benchmark option -enum BenchmarkAttrKeyword { +enum BenchmarkAttr { Extra, SkipMeta, + /// How the PoV should be measured. + PoV(PovModeAttr), } -impl syn::parse::Parse for BenchmarkAttrKeyword { +impl syn::parse::Parse for PovModeAttr { + fn parse(input: ParseStream) -> Result { + let _pov: keywords::pov_mode = input.parse()?; + let _eq: Token![=] = input.parse()?; + let root = PovEstimationMode::parse(input)?; + + let mut maybe_content = None; + let _ = || -> Result<()> { + let content; + syn::braced!(content in input); + maybe_content = Some(content); + Ok(()) + }(); + + let per_key = match maybe_content { + Some(content) => { + let per_key = Punctuated::::parse_terminated(&content)?; + per_key.into_iter().collect() + }, + None => Vec::new(), + }; + + Ok(Self { root, per_key }) + } +} + +impl syn::parse::Parse for BenchmarkAttr { fn parse(input: ParseStream) -> Result { let lookahead = input.lookahead1(); if lookahead.peek(keywords::extra) { let _extra: keywords::extra = input.parse()?; - return Ok(BenchmarkAttrKeyword::Extra) + Ok(BenchmarkAttr::Extra) } else if lookahead.peek(keywords::skip_meta) { let _skip_meta: keywords::skip_meta = input.parse()?; - return Ok(BenchmarkAttrKeyword::SkipMeta) + Ok(BenchmarkAttr::SkipMeta) + } else if lookahead.peek(keywords::pov_mode) { + PovModeAttr::parse(input).map(BenchmarkAttr::PoV) + } else { + Err(lookahead.error()) + } + } +} + +/// A `#[pov_mode = .. { .. }]` attribute. +#[derive(Debug, Clone)] +struct PovModeAttr { + /// The root mode for this benchmarks. + root: PovEstimationMode, + /// The pov-mode for a specific key. This overwrites `root` for this key. + per_key: Vec, +} + +/// A single key-value pair inside the `{}` of a `#[pov_mode = .. { .. }]` attribute. +#[derive(Debug, Clone, derive_syn_parse::Parse)] +struct PovModeKeyAttr { + /// A specific storage key for which to set the PoV mode. + key: Path, + _underscore: Token![:], + /// The PoV mode for this key. + mode: PovEstimationMode, +} + +/// How the PoV should be estimated. +#[derive(Debug, Eq, PartialEq, Clone, Copy)] +pub enum PovEstimationMode { + /// Use the maximal encoded length as provided by [`codec::MaxEncodedLen`]. + MaxEncodedLen, + /// Measure the accessed value size in the pallet benchmarking and add some trie overhead. + Measured, + /// Do not estimate the PoV size for this storage item or benchmark. + Ignored, +} + +impl syn::parse::Parse for PovEstimationMode { + fn parse(input: ParseStream) -> Result { + let lookahead = input.lookahead1(); + if lookahead.peek(keywords::MaxEncodedLen) { + let _max_encoded_len: keywords::MaxEncodedLen = input.parse()?; + return Ok(PovEstimationMode::MaxEncodedLen) + } else if lookahead.peek(keywords::Measured) { + let _measured: keywords::Measured = input.parse()?; + return Ok(PovEstimationMode::Measured) + } else if lookahead.peek(keywords::Ignored) { + let _ignored: keywords::Ignored = input.parse()?; + return Ok(PovEstimationMode::Ignored) } else { return Err(lookahead.error()) } } } +impl ToString for PovEstimationMode { + fn to_string(&self) -> String { + match self { + PovEstimationMode::MaxEncodedLen => "MaxEncodedLen".into(), + PovEstimationMode::Measured => "Measured".into(), + PovEstimationMode::Ignored => "Ignored".into(), + } + } +} + +impl quote::ToTokens for PovEstimationMode { + fn to_tokens(&self, tokens: &mut TokenStream2) { + match self { + PovEstimationMode::MaxEncodedLen => tokens.extend(quote!(MaxEncodedLen)), + PovEstimationMode::Measured => tokens.extend(quote!(Measured)), + PovEstimationMode::Ignored => tokens.extend(quote!(Ignored)), + } + } +} + impl syn::parse::Parse for BenchmarkAttrs { fn parse(input: ParseStream) -> syn::Result { let mut extra = false; let mut skip_meta = false; - let args = Punctuated::::parse_terminated(&input)?; + let mut pov_mode = None; + let args = Punctuated::::parse_terminated(&input)?; + for arg in args.into_iter() { match arg { - BenchmarkAttrKeyword::Extra => { + BenchmarkAttr::Extra => { if extra { return Err(input.error("`extra` can only be specified once")) } extra = true; }, - BenchmarkAttrKeyword::SkipMeta => { + BenchmarkAttr::SkipMeta => { if skip_meta { return Err(input.error("`skip_meta` can only be specified once")) } skip_meta = true; }, + BenchmarkAttr::PoV(mode) => { + if pov_mode.is_some() { + return Err(input.error("`pov_mode` can only be specified once")) + } + pov_mode = Some(mode); + }, } } - Ok(BenchmarkAttrs { extra, skip_meta }) + Ok(BenchmarkAttrs { extra, skip_meta, pov_mode }) } } @@ -138,7 +249,7 @@ impl BenchmarkCallDef { } } -/// Represents a parsed `#[benchmark]` or `#[instance_banchmark]` item. +/// Represents a parsed `#[benchmark]` or `#[instance_benchmark]` item. #[derive(Clone)] struct BenchmarkDef { params: Vec, @@ -344,6 +455,7 @@ pub fn benchmarks( tokens: TokenStream, instance: bool, ) -> syn::Result { + let krate = generate_access_from_frame_or_crate("frame-benchmarking")?; // gather module info let module: ItemMod = syn::parse(tokens)?; let mod_span = module.span(); @@ -354,7 +466,7 @@ pub fn benchmarks( let mod_vis = module.vis; let mod_name = module.ident; - // consume #[benchmarks] attribute by exclusing it from mod_attrs + // consume #[benchmarks] attribute by excluding it from mod_attrs let mod_attrs: Vec<&Attribute> = module .attrs .iter() @@ -364,6 +476,8 @@ pub fn benchmarks( let mut benchmark_names: Vec = Vec::new(); let mut extra_benchmark_names: Vec = Vec::new(); let mut skip_meta_benchmark_names: Vec = Vec::new(); + // Map benchmarks to PoV modes. + let mut pov_modes = Vec::new(); let (_brace, mut content) = module.content.ok_or(syn::Error::new(mod_span, "Module cannot be empty!"))?; @@ -400,6 +514,25 @@ pub fn benchmarks( } else if benchmark_attrs.skip_meta { skip_meta_benchmark_names.push(name.clone()); } + + if let Some(mode) = benchmark_attrs.pov_mode { + let mut modes = Vec::new(); + // We cannot expand strings here since it is no-std, but syn does not expand bytes. + let name = name.to_string(); + let m = mode.root.to_string(); + modes.push(quote!(("ALL".as_bytes().to_vec(), #m.as_bytes().to_vec()))); + + for attr in mode.per_key.iter() { + // syn always puts spaces in quoted paths: + let key = attr.key.clone().into_token_stream().to_string().replace(" ", ""); + let mode = attr.mode.to_string(); + modes.push(quote!((#key.as_bytes().to_vec(), #mode.as_bytes().to_vec()))); + } + + pov_modes.push( + quote!((#name.as_bytes().to_vec(), #krate::__private::vec![#(#modes),*])), + ); + } } // expand benchmark @@ -419,7 +552,6 @@ pub fn benchmarks( true => quote!(T: Config, I: 'static), }; - let krate = generate_access_from_frame_or_crate("frame-benchmarking")?; let frame_system = generate_access_from_frame_or_crate("frame-system")?; // benchmark name variables @@ -537,6 +669,16 @@ pub fn benchmarks( ]; all_names.retain(|x| !extra.contains(x)); } + let pov_modes: + #krate::__private::Vec<( + #krate::__private::Vec, + #krate::__private::Vec<( + #krate::__private::Vec, + #krate::__private::Vec + )>, + )> = #krate::__private::vec![ + #( #pov_modes ),* + ]; all_names.into_iter().map(|benchmark| { let selected_benchmark = match benchmark { #(#selected_benchmark_mappings), @@ -544,12 +686,13 @@ pub fn benchmarks( _ => panic!("all benchmarks should be selectable") }; let components = >::components(&selected_benchmark); + let name = benchmark.as_bytes().to_vec(); + let modes = pov_modes.iter().find(|p| p.0 == name).map(|p| p.1.clone()); + #krate::BenchmarkMetadata { name: benchmark.as_bytes().to_vec(), components, - // TODO: Not supported by V2 syntax as of yet. - // https://github.com/paritytech/substrate/issues/13132 - pov_modes: alloc::vec![], + pov_modes: modes.unwrap_or_default(), } }).collect::>() } diff --git a/substrate/frame/support/procedural/src/construct_runtime/mod.rs b/substrate/frame/support/procedural/src/construct_runtime/mod.rs index 306bea8d49e7..a16bf17a44d6 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/mod.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/mod.rs @@ -24,7 +24,7 @@ //! - Implicitly: `System: frame_system` //! - Explicitly: `System: frame_system::{Pallet, Call}` //! -//! The `construct_runtime` transitions from the implicit definition to the explict one. +//! The `construct_runtime` transitions from the implicit definition to the explicit one. //! From the explicit state, Substrate expands the pallets with additional information //! that is to be included in the runtime metadata. This expansion makes visible some extra //! parts of the pallets, mainly the `Error` if defined. The expanded state looks like @@ -55,7 +55,7 @@ //! +----------+ +------------------+ //! ``` //! -//! When all pallet parts are implcit, then the `construct_runtime!` macro expands to its final +//! When all pallet parts are implicit, then the `construct_runtime!` macro expands to its final //! state, the `ExplicitExpanded`. Otherwise, all implicit parts are converted to an explicit //! expanded part allow the `construct_runtime!` to expand any remaining explicit parts to an //! explicit expanded part. @@ -202,7 +202,7 @@ //! Similarly to the previous transition, the macro expansion transforms `System: //! frame_system::{Pallet, Call}` into `System: frame_system expanded::{Error} ::{Pallet, Call}`. //! The `expanded` section adds extra parts that the Substrate would like to expose for each pallet -//! by default. This is done to expose the approprite types for metadata construction. +//! by default. This is done to expose the appropriate types for metadata construction. //! //! This time, instead of calling `tt_default_parts` we are using the `tt_extra_parts` macro. //! This macro returns the ` :: expanded { Error }` list of additional parts we would like to diff --git a/substrate/frame/support/procedural/src/construct_runtime/parse.rs b/substrate/frame/support/procedural/src/construct_runtime/parse.rs index 88f3f14dc86c..31866c787b0f 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/parse.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/parse.rs @@ -322,7 +322,7 @@ impl Parse for PalletDeclaration { /// A struct representing a path to a pallet. `PalletPath` is almost identical to the standard /// Rust path with a few restrictions: /// - No leading colons allowed -/// - Path segments can only consist of identifers separated by colons +/// - Path segments can only consist of identifiers separated by colons #[derive(Debug, Clone)] pub struct PalletPath { pub inner: Path, @@ -595,7 +595,7 @@ pub struct Pallet { pub is_expanded: bool, /// The name of the pallet, e.g.`System` in `System: frame_system`. pub name: Ident, - /// Either automatically infered, or defined (e.g. `MyPallet ... = 3,`). + /// Either automatically inferred, or defined (e.g. `MyPallet ... = 3,`). pub index: u8, /// The path of the pallet, e.g. `frame_system` in `System: frame_system`. pub path: PalletPath, @@ -634,7 +634,7 @@ impl Pallet { /// +----------+ +----------+ +------------------+ /// ``` enum PalletsConversion { - /// Pallets implicitely declare parts. + /// Pallets implicitly declare parts. /// /// `System: frame_system`. Implicit(Vec), @@ -648,7 +648,7 @@ enum PalletsConversion { /// Pallets explicitly declare parts that are fully expanded. /// /// This is the end state that contains extra parts included by - /// default by Subtrate. + /// default by Substrate. /// /// `System: frame_system expanded::{Error} ::{Pallet, Call}` /// @@ -660,7 +660,7 @@ enum PalletsConversion { /// /// Check if all pallet have explicit declaration of their parts, if so then assign index to each /// pallet using same rules as rust for fieldless enum. I.e. implicit are assigned number -/// incrementedly from last explicit or 0. +/// incrementally from last explicit or 0. fn convert_pallets(pallets: Vec) -> syn::Result { if pallets.iter().any(|pallet| pallet.pallet_parts.is_none()) { return Ok(PalletsConversion::Implicit(pallets)) diff --git a/substrate/frame/support/procedural/src/derive_impl.rs b/substrate/frame/support/procedural/src/derive_impl.rs index 8740ccd401ab..54755f1163a1 100644 --- a/substrate/frame/support/procedural/src/derive_impl.rs +++ b/substrate/frame/support/procedural/src/derive_impl.rs @@ -279,7 +279,7 @@ fn test_runtime_type_with_doc() { } #[test] -fn test_disambugation_path() { +fn test_disambiguation_path() { let foreign_impl: ItemImpl = parse_quote!(impl SomeTrait for SomeType {}); let default_impl_path: Path = parse_quote!(SomeScope::SomeType); diff --git a/substrate/frame/support/procedural/src/dynamic_params.rs b/substrate/frame/support/procedural/src/dynamic_params.rs index b718ccbc9558..29399a885bc6 100644 --- a/substrate/frame/support/procedural/src/dynamic_params.rs +++ b/substrate/frame/support/procedural/src/dynamic_params.rs @@ -147,8 +147,8 @@ fn ensure_codec_index(attrs: &Vec, span: Span) -> Result<()> { /// Used to inject arguments into the inner `#[dynamic_pallet_params(..)]` attribute. /// -/// This allows the outer `#[dynamic_params(..)]` attribute to specify some arguments that dont need -/// to be repeated every time. +/// This allows the outer `#[dynamic_params(..)]` attribute to specify some arguments that don't +/// need to be repeated every time. struct MacroInjectArgs { runtime_params: syn::Ident, params_pallet: syn::Type, @@ -311,7 +311,7 @@ impl ToTokens for DynamicPalletParamAttr { )* } - impl #scrate::traits::dynamic_params::AggregratedKeyValue for Parameters { + impl #scrate::traits::dynamic_params::AggregatedKeyValue for Parameters { type Key = #key_ident; type Value = #value_ident; @@ -497,7 +497,7 @@ impl ToTokens for DynamicParamAggregatedEnum { #vis enum #params_key_ident { #( #(#attributes)* - #param_names(<#param_types as #scrate::traits::dynamic_params::AggregratedKeyValue>::Key), + #param_names(<#param_types as #scrate::traits::dynamic_params::AggregatedKeyValue>::Key), )* } @@ -515,11 +515,11 @@ impl ToTokens for DynamicParamAggregatedEnum { #vis enum #params_value_ident { #( #(#attributes)* - #param_names(<#param_types as #scrate::traits::dynamic_params::AggregratedKeyValue>::Value), + #param_names(<#param_types as #scrate::traits::dynamic_params::AggregatedKeyValue>::Value), )* } - impl #scrate::traits::dynamic_params::AggregratedKeyValue for #name { + impl #scrate::traits::dynamic_params::AggregatedKeyValue for #name { type Key = #params_key_ident; type Value = #params_value_ident; @@ -536,13 +536,13 @@ impl ToTokens for DynamicParamAggregatedEnum { } #( - impl ::core::convert::From<<#param_types as #scrate::traits::dynamic_params::AggregratedKeyValue>::Key> for #params_key_ident { - fn from(key: <#param_types as #scrate::traits::dynamic_params::AggregratedKeyValue>::Key) -> Self { + impl ::core::convert::From<<#param_types as #scrate::traits::dynamic_params::AggregatedKeyValue>::Key> for #params_key_ident { + fn from(key: <#param_types as #scrate::traits::dynamic_params::AggregatedKeyValue>::Key) -> Self { #params_key_ident::#param_names(key) } } - impl ::core::convert::TryFrom<#params_value_ident> for <#param_types as #scrate::traits::dynamic_params::AggregratedKeyValue>::Value { + impl ::core::convert::TryFrom<#params_value_ident> for <#param_types as #scrate::traits::dynamic_params::AggregatedKeyValue>::Value { type Error = (); fn try_from(value: #params_value_ident) -> Result { diff --git a/substrate/frame/support/procedural/src/lib.rs b/substrate/frame/support/procedural/src/lib.rs index dee6d522d25c..f22be024d3fe 100644 --- a/substrate/frame/support/procedural/src/lib.rs +++ b/substrate/frame/support/procedural/src/lib.rs @@ -137,7 +137,7 @@ fn counter_prefix(prefix: &str) -> String { /// - `Call` - If the pallet has callable functions /// - `Storage` - If the pallet uses storage /// - `Event` or `Event` (if the event is generic) - If the pallet emits events -/// - `Origin` or `Origin` (if the origin is generic) - If the pallet has instanciable origins +/// - `Origin` or `Origin` (if the origin is generic) - If the pallet has instantiable origins /// - `Config` or `Config` (if the config is generic) - If the pallet builds the genesis /// storage with `GenesisConfig` /// - `Inherent` - If the pallet provides/can check inherents. @@ -166,7 +166,7 @@ fn counter_prefix(prefix: &str) -> String { /// and `Event` are encoded, and to define the ModuleToIndex value. /// /// if `= $n` is not given, then index is resolved in the same way as fieldless enum in Rust -/// (i.e. incrementedly from previous index): +/// (i.e. incrementally from previous index): /// ```nocompile /// pallet1 .. = 2, /// pallet2 .., // Here pallet2 is given index 3 @@ -460,7 +460,7 @@ pub fn storage_alias(attributes: TokenStream, input: TokenStream) -> TokenStream } /// This attribute can be used to derive a full implementation of a trait based on a local partial -/// impl and an external impl containing defaults that can be overriden in the local impl. +/// impl and an external impl containing defaults that can be overridden in the local impl. /// /// For a full end-to-end example, see [below](#use-case-auto-derive-test-pallet-config-traits). /// @@ -665,9 +665,9 @@ pub fn storage_alias(attributes: TokenStream, input: TokenStream) -> TokenStream "{}::macro_magic", match generate_access_from_frame_or_crate("frame-support") { Ok(path) => Ok(path), - Err(_) => generate_access_from_frame_or_crate("frame"), + Err(_) => generate_access_from_frame_or_crate("polkadot-sdk-frame"), } - .expect("Failed to find either `frame-support` or `frame` in `Cargo.toml` dependencies.") + .expect("Failed to find either `frame-support` or `polkadot-sdk-frame` in `Cargo.toml` dependencies.") .to_token_stream() .to_string() ) @@ -1181,9 +1181,9 @@ pub fn pallet_section(attr: TokenStream, tokens: TokenStream) -> TokenStream { "{}::macro_magic", match generate_access_from_frame_or_crate("frame-support") { Ok(path) => Ok(path), - Err(_) => generate_access_from_frame_or_crate("frame"), + Err(_) => generate_access_from_frame_or_crate("polkadot-sdk-frame"), } - .expect("Failed to find either `frame-support` or `frame` in `Cargo.toml` dependencies.") + .expect("Failed to find either `frame-support` or `polkadot-sdk-frame` in `Cargo.toml` dependencies.") .to_token_stream() .to_string() ) diff --git a/substrate/frame/support/procedural/src/pallet/parse/config.rs b/substrate/frame/support/procedural/src/pallet/parse/config.rs index fbab92db196c..406072df4b9d 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/config.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/config.rs @@ -275,7 +275,8 @@ fn check_event_type( } /// Check that the path to `frame_system::Config` is valid, this is that the path is just -/// `frame_system::Config` or when using the `frame` crate it is `frame::xyz::frame_system::Config`. +/// `frame_system::Config` or when using the `frame` crate it is +/// `polkadot_sdk_frame::xyz::frame_system::Config`. fn has_expected_system_config(path: syn::Path, frame_system: &syn::Path) -> bool { // Check if `frame_system` is actually 'frame_system'. if path.segments.iter().all(|s| s.ident != "frame_system") { @@ -293,7 +294,7 @@ fn has_expected_system_config(path: syn::Path, frame_system: &syn::Path) -> bool // `frame` re-exports it as such. syn::parse2::(quote::quote!(frame_system)).expect("is a valid path; qed"), (_, _) => - // They are either both `frame_system` or both `frame::xyz::frame_system`. + // They are either both `frame_system` or both `polkadot_sdk_frame::xyz::frame_system`. frame_system.clone(), }; @@ -516,14 +517,28 @@ mod tests { #[test] fn has_expected_system_config_works_with_frame() { + let path = syn::parse2::(quote::quote!(frame_system::Config)).unwrap(); + + let frame_system = + syn::parse2::(quote::quote!(polkadot_sdk_frame::deps::frame_system)) + .unwrap(); + assert!(has_expected_system_config(path.clone(), &frame_system)); + let frame_system = syn::parse2::(quote::quote!(frame::deps::frame_system)).unwrap(); - let path = syn::parse2::(quote::quote!(frame_system::Config)).unwrap(); assert!(has_expected_system_config(path, &frame_system)); } #[test] fn has_expected_system_config_works_with_frame_full_path() { + let frame_system = + syn::parse2::(quote::quote!(polkadot_sdk_frame::deps::frame_system)) + .unwrap(); + let path = + syn::parse2::(quote::quote!(polkadot_sdk_frame::deps::frame_system::Config)) + .unwrap(); + assert!(has_expected_system_config(path, &frame_system)); + let frame_system = syn::parse2::(quote::quote!(frame::deps::frame_system)).unwrap(); let path = @@ -533,6 +548,13 @@ mod tests { #[test] fn has_expected_system_config_works_with_other_frame_full_path() { + let frame_system = + syn::parse2::(quote::quote!(polkadot_sdk_frame::xyz::frame_system)).unwrap(); + let path = + syn::parse2::(quote::quote!(polkadot_sdk_frame::xyz::frame_system::Config)) + .unwrap(); + assert!(has_expected_system_config(path, &frame_system)); + let frame_system = syn::parse2::(quote::quote!(frame::xyz::frame_system)).unwrap(); let path = @@ -543,18 +565,21 @@ mod tests { #[test] fn has_expected_system_config_does_not_works_with_mixed_frame_full_path() { let frame_system = - syn::parse2::(quote::quote!(frame::xyz::frame_system)).unwrap(); + syn::parse2::(quote::quote!(polkadot_sdk_frame::xyz::frame_system)).unwrap(); let path = - syn::parse2::(quote::quote!(frame::deps::frame_system::Config)).unwrap(); + syn::parse2::(quote::quote!(polkadot_sdk_frame::deps::frame_system::Config)) + .unwrap(); assert!(!has_expected_system_config(path, &frame_system)); } #[test] fn has_expected_system_config_does_not_works_with_other_mixed_frame_full_path() { let frame_system = - syn::parse2::(quote::quote!(frame::deps::frame_system)).unwrap(); + syn::parse2::(quote::quote!(polkadot_sdk_frame::deps::frame_system)) + .unwrap(); let path = - syn::parse2::(quote::quote!(frame::xyz::frame_system::Config)).unwrap(); + syn::parse2::(quote::quote!(polkadot_sdk_frame::xyz::frame_system::Config)) + .unwrap(); assert!(!has_expected_system_config(path, &frame_system)); } @@ -562,7 +587,8 @@ mod tests { fn has_expected_system_config_does_not_work_with_frame_full_path_if_not_frame_crate() { let frame_system = syn::parse2::(quote::quote!(frame_system)).unwrap(); let path = - syn::parse2::(quote::quote!(frame::deps::frame_system::Config)).unwrap(); + syn::parse2::(quote::quote!(polkadot_sdk_frame::deps::frame_system::Config)) + .unwrap(); assert!(!has_expected_system_config(path, &frame_system)); } diff --git a/substrate/frame/support/procedural/src/pallet/parse/mod.rs b/substrate/frame/support/procedural/src/pallet/parse/mod.rs index b55f130a93a7..6e12774611dd 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/mod.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/mod.rs @@ -321,7 +321,7 @@ impl Def { Ok(()) } - /// Tries to locate a manual tasks impl (an impl impling a trait whose last path segment is + /// Tries to locate a manual tasks impl (an impl implementing a trait whose last path segment is /// `Task`) in the event that one has not been found already via the attribute macro pub fn resolve_manual_tasks_impl( tasks: &mut Option, diff --git a/substrate/frame/support/procedural/tools/src/lib.rs b/substrate/frame/support/procedural/tools/src/lib.rs index b5d08f7c203a..4e824ad4fbb1 100644 --- a/substrate/frame/support/procedural/tools/src/lib.rs +++ b/substrate/frame/support/procedural/tools/src/lib.rs @@ -54,15 +54,19 @@ pub fn generate_crate_access(unique_id: &str, def_crate: &str) -> TokenStream { /// /// This will usually check the output of [`generate_access_from_frame_or_crate`]. /// We want to know if whatever the `path` takes us to, is exported from `frame` or not. In that -/// case `path` would start with `frame`, something like `frame::x::y:z`. +/// case `path` would start with `frame`, something like `polkadot_sdk_frame::x::y:z` or +/// frame::x::y:z. pub fn is_using_frame_crate(path: &syn::Path) -> bool { - path.segments.first().map(|s| s.ident == "frame").unwrap_or(false) + path.segments + .first() + .map(|s| s.ident == "polkadot_sdk_frame" || s.ident == "frame") + .unwrap_or(false) } /// Generate the crate access for the crate using 2018 syntax. /// -/// If `frame` is in scope, it will use `frame::deps::`. Else, it will try and find -/// `` directly. +/// If `frame` is in scope, it will use `polkadot_sdk_frame::deps::`. Else, it will try +/// and find `` directly. pub fn generate_access_from_frame_or_crate(def_crate: &str) -> Result { if let Some(path) = get_frame_crate_path(def_crate) { Ok(path) @@ -117,7 +121,9 @@ pub fn generate_hidden_includes(unique_id: &str, def_crate: &str) -> TokenStream /// Generates the path to the frame crate deps. fn get_frame_crate_path(def_crate: &str) -> Option { // This does not work if the frame crate is renamed. - if let Ok(FoundCrate::Name(name)) = crate_name(&"frame") { + if let Ok(FoundCrate::Name(name)) = + crate_name(&"polkadot-sdk-frame").or_else(|_| crate_name(&"frame")) + { let path = format!("{}::deps::{}", name, def_crate.to_string().replace("-", "_")); Some(syn::parse_str::(&path).expect("is a valid path; qed")) } else { diff --git a/substrate/frame/support/src/dispatch_context.rs b/substrate/frame/support/src/dispatch_context.rs index 40ea3359c973..10bda0b91ab9 100644 --- a/substrate/frame/support/src/dispatch_context.rs +++ b/substrate/frame/support/src/dispatch_context.rs @@ -18,7 +18,7 @@ //! Provides functions to interact with the dispatch context. //! //! A Dispatch context is created by calling [`run_in_context`] and then the given closure will be -//! executed in this dispatch context. Everyting run in this `closure` will have access to the same +//! executed in this dispatch context. Everything run in this `closure` will have access to the same //! dispatch context. This also applies to nested calls of [`run_in_context`]. The dispatch context //! can be used to store and retrieve information locally in this context. The dispatch context can //! be accessed by using [`with_context`]. This function will execute the given closure and give it @@ -51,7 +51,7 @@ //! //! run_in_context(|| { //! with_context::(|v| { -//! // Intitialize the value to the default value. +//! // Initialize the value to the default value. //! assert_eq!(0, v.or_default().0); //! v.or_default().0 = 10; //! }); diff --git a/substrate/frame/support/src/instances.rs b/substrate/frame/support/src/instances.rs index 396018d5cbd5..ecb356af50b6 100644 --- a/substrate/frame/support/src/instances.rs +++ b/substrate/frame/support/src/instances.rs @@ -31,83 +31,83 @@ //! NOTE: [`frame_support::pallet`] will reexport them inside the module, in order to make them //! accessible to [`frame_support::construct_runtime`]. -/// `Instance1` to be used for instantiable palllets defined with the +/// `Instance1` to be used for instantiable pallets defined with the /// [`#[pallet]`](`frame_support::pallet`) macro. Instances 2-16 are also available but are hidden /// from docs. #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance1; -/// `Instance2` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance2` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance2; -/// `Instance3` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance3` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance3; -/// `Instance4` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance4` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance4; -/// `Instance5` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance5` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance5; -/// `Instance6` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance6` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance6; -/// `Instance7` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance7` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance7; -/// `Instance8` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance8` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance8; -/// `Instance9` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance9` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance9; -/// `Instance10` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance10` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance10; -/// `Instance11` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance11` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance11; -/// `Instance12` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance12` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance12; -/// `Instance13` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance13` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance13; -/// `Instance14` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance14` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance14; -/// `Instance15` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance15` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance15; -/// `Instance16` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance16` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance16; diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index ecacc76fabdd..5f8bae8ecd6e 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -146,7 +146,7 @@ impl TypeId for PalletId { /// # Examples /// /// There are different ways to declare the `prefix` to use. The `prefix` type can either be -/// declared explicetly by passing it to the macro as an attribute or by letting the macro +/// declared explicitly by passing it to the macro as an attribute or by letting the macro /// guess on what the `prefix` type is. The `prefix` is always passed as the first generic /// argument to the type declaration. When using [`#[pallet::storage]`](pallet_macros::storage) /// this first generic argument is always `_`. Besides declaring the `prefix`, the rest of the @@ -1113,7 +1113,7 @@ pub mod pallet_macros { /// Declares a storage as unbounded in potential size. /// - /// When implementating the storage info (when `#[pallet::generate_storage_info]` is + /// When implementing the storage info (when `#[pallet::generate_storage_info]` is /// specified on the pallet struct placeholder), the size of the storage will be declared /// as unbounded. This can be useful for storage which can never go into PoV (Proof of /// Validity). @@ -2350,7 +2350,7 @@ pub mod pallet_macros { /// Allows defining conditions for a task to run. /// - /// This attribute is attached to a function inside an `impl` block annoated with + /// This attribute is attached to a function inside an `impl` block annotated with /// [`pallet::tasks_experimental`](`tasks_experimental`) to define the conditions for a /// given work item to be valid. /// @@ -2361,7 +2361,7 @@ pub mod pallet_macros { /// Allows defining an index for a task. /// - /// This attribute is attached to a function inside an `impl` block annoated with + /// This attribute is attached to a function inside an `impl` block annotated with /// [`pallet::tasks_experimental`](`tasks_experimental`) to define the index of a given /// work item. /// @@ -2371,7 +2371,7 @@ pub mod pallet_macros { /// Allows defining an iterator over available work items for a task. /// - /// This attribute is attached to a function inside an `impl` block annoated with + /// This attribute is attached to a function inside an `impl` block annotated with /// [`pallet::tasks_experimental`](`tasks_experimental`). /// /// It takes an iterator as input that yields a tuple with same types as the function @@ -2380,7 +2380,7 @@ pub mod pallet_macros { /// Allows defining the weight of a task. /// - /// This attribute is attached to a function inside an `impl` block annoated with + /// This attribute is attached to a function inside an `impl` block annotated with /// [`pallet::tasks_experimental`](`tasks_experimental`) define the weight of a given work /// item. /// diff --git a/substrate/frame/support/src/migrations.rs b/substrate/frame/support/src/migrations.rs index 772a94f3693c..33edaeb56894 100644 --- a/substrate/frame/support/src/migrations.rs +++ b/substrate/frame/support/src/migrations.rs @@ -43,19 +43,19 @@ use sp_runtime::traits::Zero; /// It takes 5 type parameters: /// - `From`: The version being upgraded from. /// - `To`: The version being upgraded to. -/// - `Inner`: An implementation of `OnRuntimeUpgrade`. +/// - `Inner`: An implementation of `UncheckedOnRuntimeUpgrade`. /// - `Pallet`: The Pallet being upgraded. /// - `Weight`: The runtime's RuntimeDbWeight implementation. /// /// When a [`VersionedMigration`] `on_runtime_upgrade`, `pre_upgrade`, or `post_upgrade` method is /// called, the on-chain version of the pallet is compared to `From`. If they match, the `Inner` -/// equivalent is called and the pallets on-chain version is set to `To` after the migration. -/// Otherwise, a warning is logged notifying the developer that the upgrade was a noop and should -/// probably be removed. +/// `UncheckedOnRuntimeUpgrade` is called and the pallets on-chain version is set to `To` +/// after the migration. Otherwise, a warning is logged notifying the developer that the upgrade was +/// a noop and should probably be removed. /// -/// It is STRONGLY RECOMMENDED to write the unversioned migration logic in a private module and -/// only export the versioned migration logic to prevent accidentally using the unversioned -/// migration in any runtimes. +/// By not bounding `Inner` with `OnRuntimeUpgrade`, we prevent developers from +/// accidentally using the unchecked version of the migration in a runtime upgrade instead of +/// [`VersionedMigration`]. /// /// ### Examples /// ```ignore @@ -73,9 +73,9 @@ use sp_runtime::traits::Zero; /// /// - https://internals.rust-lang.org/t/lang-team-minutes-private-in-public-rules/4504/40 /// mod version_unchecked { /// use super::*; -/// pub struct MigrateV5ToV6(core::marker::PhantomData); -/// impl OnRuntimeUpgrade for VersionUncheckedMigrateV5ToV6 { -/// // OnRuntimeUpgrade implementation... +/// pub struct VersionUncheckedMigrateV5ToV6(core::marker::PhantomData); +/// impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateV5ToV6 { +/// // `UncheckedOnRuntimeUpgrade` implementation... /// } /// } /// @@ -118,7 +118,7 @@ pub enum VersionedPostUpgradeData { impl< const FROM: u16, const TO: u16, - Inner: crate::traits::OnRuntimeUpgrade, + Inner: crate::traits::UncheckedOnRuntimeUpgrade, Pallet: GetStorageVersion + PalletInfoAccess, DbWeight: Get, > crate::traits::OnRuntimeUpgrade for VersionedMigration diff --git a/substrate/frame/support/src/storage/migration.rs b/substrate/frame/support/src/storage/migration.rs index fb05b7f06186..51db68cd47de 100644 --- a/substrate/frame/support/src/storage/migration.rs +++ b/substrate/frame/support/src/storage/migration.rs @@ -304,11 +304,11 @@ pub fn take_storage_item /// Move a storage from a pallet prefix to another pallet prefix. /// /// Keys used in pallet storages always start with: -/// `concat(twox_128(pallet_name), towx_128(storage_name))`. +/// `concat(twox_128(pallet_name), twox_128(storage_name))`. /// /// This function will remove all value for which the key start with -/// `concat(twox_128(old_pallet_name), towx_128(storage_name))` and insert them at the key with -/// the start replaced by `concat(twox_128(new_pallet_name), towx_128(storage_name))`. +/// `concat(twox_128(old_pallet_name), twox_128(storage_name))` and insert them at the key with +/// the start replaced by `concat(twox_128(new_pallet_name), twox_128(storage_name))`. /// /// # Example /// @@ -340,7 +340,7 @@ pub fn move_storage_from_pallet( /// Move all storages from a pallet prefix to another pallet prefix. /// /// Keys used in pallet storages always start with: -/// `concat(twox_128(pallet_name), towx_128(storage_name))`. +/// `concat(twox_128(pallet_name), twox_128(storage_name))`. /// /// This function will remove all value for which the key start with `twox_128(old_pallet_name)` /// and insert them at the key with the start replaced by `twox_128(new_pallet_name)`. diff --git a/substrate/frame/support/src/storage/mod.rs b/substrate/frame/support/src/storage/mod.rs index b5d3165406ae..7fb991d37792 100644 --- a/substrate/frame/support/src/storage/mod.rs +++ b/substrate/frame/support/src/storage/mod.rs @@ -160,7 +160,7 @@ pub trait StorageValue { /// /// # Warning /// - /// `None` does not mean that `get()` does not return a value. The default value is completly + /// `None` does not mean that `get()` does not return a value. The default value is completely /// ignored by this function. fn decode_len() -> Option where @@ -364,7 +364,7 @@ pub trait StorageMap { /// /// # Warning /// - /// `None` does not mean that `get()` does not return a value. The default value is completly + /// `None` does not mean that `get()` does not return a value. The default value is completely /// ignored by this function. fn decode_len>(key: KeyArg) -> Option where @@ -382,7 +382,8 @@ pub trait StorageMap { /// /// # Warning /// - /// - `None` does not mean that `get()` does not return a value. The default value is completly + /// - `None` does not mean that `get()` does not return a value. The default value is + /// completely /// ignored by this function. /// /// - The value returned is the non-deduplicated length of the underlying Vector in storage.This @@ -411,7 +412,7 @@ pub trait StorageMap { pub trait IterableStorageMap: StorageMap { /// The type that iterates over all `(key, value)`. type Iterator: Iterator; - /// The type that itereates over all `key`s. + /// The type that iterates over all `key`s. type KeyIterator: Iterator; /// Enumerate all elements in the map in lexicographical order of the encoded key. If you @@ -778,7 +779,7 @@ pub trait StorageDoubleMap { /// /// # Warning /// - /// `None` does not mean that `get()` does not return a value. The default value is completly + /// `None` does not mean that `get()` does not return a value. The default value is completely /// ignored by this function. fn decode_len(key1: KArg1, key2: KArg2) -> Option where @@ -799,7 +800,7 @@ pub trait StorageDoubleMap { /// /// # Warning /// - /// `None` does not mean that `get()` does not return a value. The default value is completly + /// `None` does not mean that `get()` does not return a value. The default value is completely /// ignored by this function. fn decode_non_dedup_len(key1: KArg1, key2: KArg2) -> Option where @@ -981,7 +982,7 @@ pub trait StorageNMap { /// /// # Warning /// - /// `None` does not mean that `get()` does not return a value. The default value is completly + /// `None` does not mean that `get()` does not return a value. The default value is completely /// ignored by this function. fn decode_len + TupleToEncodedIter>(key: KArg) -> Option where @@ -1489,8 +1490,8 @@ pub trait StorageDecodeLength: private::Sealed + codec::DecodeLength { } } -/// It is expected that the length is at the beginning of the encoded objectand that the length is a -/// `Compact`. +/// It is expected that the length is at the beginning of the encoded object and that the length is +/// a `Compact`. /// /// # Note /// The length returned by this trait is not deduplicated, i.e. it is the length of the underlying @@ -1791,7 +1792,7 @@ mod test { }); } - // This test ensures that the Digest encoding does not change without being noticied. + // This test ensures that the Digest encoding does not change without being noticed. #[test] fn digest_storage_append_works_as_expected() { TestExternalities::default().execute_with(|| { diff --git a/substrate/frame/support/src/storage/stream_iter.rs b/substrate/frame/support/src/storage/stream_iter.rs index ef93f04b04a1..0d1e5582f841 100644 --- a/substrate/frame/support/src/storage/stream_iter.rs +++ b/substrate/frame/support/src/storage/stream_iter.rs @@ -217,7 +217,7 @@ const STORAGE_INPUT_BUFFER_CAPACITY: usize = 2 * 1024; /// Implementation of [`codec::Input`] using [`sp_io::storage::read`]. /// /// Keeps an internal buffer with a size of [`STORAGE_INPUT_BUFFER_CAPACITY`]. All read accesses -/// are tried to be served by this buffer. If the buffer doesn't hold enough bytes to fullfill the +/// are tried to be served by this buffer. If the buffer doesn't hold enough bytes to fulfill the /// current read access, the buffer is re-filled from the state. A read request that is bigger than /// the internal buffer is directly forwarded to the state to reduce the number of reads from the /// state. diff --git a/substrate/frame/support/src/storage/types/counted_map.rs b/substrate/frame/support/src/storage/types/counted_map.rs index 4336de94786a..61a10d674138 100644 --- a/substrate/frame/support/src/storage/types/counted_map.rs +++ b/substrate/frame/support/src/storage/types/counted_map.rs @@ -312,7 +312,7 @@ where /// /// # Warning /// - /// `None` does not mean that `get()` does not return a value. The default value is completly + /// `None` does not mean that `get()` does not return a value. The default value is completely /// ignored by this function. pub fn decode_len>(key: KeyArg) -> Option where @@ -777,13 +777,13 @@ mod test { assert_eq!(A::try_get(1), Err(())); assert_eq!(A::count(), 3); - // Take exsisting. + // Take existing. assert_eq!(A::take(4), 10); assert_eq!(A::try_get(4), Err(())); assert_eq!(A::count(), 2); - // Take non-exsisting. + // Take non-existing. assert_eq!(A::take(4), ADefault::get()); assert_eq!(A::try_get(4), Err(())); @@ -1024,13 +1024,13 @@ mod test { assert_eq!(B::try_get(1), Err(())); assert_eq!(B::count(), 3); - // Take exsisting. + // Take existing. assert_eq!(B::take(4), Some(10)); assert_eq!(B::try_get(4), Err(())); assert_eq!(B::count(), 2); - // Take non-exsisting. + // Take non-existing. assert_eq!(B::take(4), None); assert_eq!(B::try_get(4), Err(())); diff --git a/substrate/frame/support/src/storage/types/counted_nmap.rs b/substrate/frame/support/src/storage/types/counted_nmap.rs index bdde30d78151..45c096536922 100644 --- a/substrate/frame/support/src/storage/types/counted_nmap.rs +++ b/substrate/frame/support/src/storage/types/counted_nmap.rs @@ -380,7 +380,7 @@ where /// /// # Warning /// - /// `None` does not mean that `get()` does not return a value. The default value is completly + /// `None` does not mean that `get()` does not return a value. The default value is completely /// ignored by this function. pub fn decode_len + TupleToEncodedIter>( key: KArg, diff --git a/substrate/frame/support/src/storage/types/double_map.rs b/substrate/frame/support/src/storage/types/double_map.rs index 50d9e04efee5..1e607cb05c8b 100644 --- a/substrate/frame/support/src/storage/types/double_map.rs +++ b/substrate/frame/support/src/storage/types/double_map.rs @@ -447,7 +447,7 @@ where /// /// # Warning /// - /// `None` does not mean that `get()` does not return a value. The default value is completly + /// `None` does not mean that `get()` does not return a value. The default value is completely /// ignored by this function. pub fn decode_len(key1: KArg1, key2: KArg2) -> Option where @@ -467,7 +467,8 @@ where /// /// # Warning /// - /// - `None` does not mean that `get()` does not return a value. The default value is completly + /// - `None` does not mean that `get()` does not return a value. The default value is + /// completely /// ignored by this function. /// /// - The value returned is the non-deduplicated length of the underlying Vector in storage.This diff --git a/substrate/frame/support/src/storage/types/map.rs b/substrate/frame/support/src/storage/types/map.rs index 3ca06c9361b4..eef6ed179db4 100644 --- a/substrate/frame/support/src/storage/types/map.rs +++ b/substrate/frame/support/src/storage/types/map.rs @@ -279,7 +279,7 @@ where /// /// # Warning /// - /// `None` does not mean that `get()` does not return a value. The default value is completly + /// `None` does not mean that `get()` does not return a value. The default value is completely /// ignored by this function. pub fn decode_len>(key: KeyArg) -> Option where @@ -297,7 +297,8 @@ where /// /// # Warning /// - /// - `None` does not mean that `get()` does not return a value. The default value is completly + /// - `None` does not mean that `get()` does not return a value. The default value is + /// completely /// ignored by this function. /// /// - The value returned is the non-deduplicated length of the underlying Vector in storage.This diff --git a/substrate/frame/support/src/storage/types/mod.rs b/substrate/frame/support/src/storage/types/mod.rs index 37de4a4a152f..12eef9a1467b 100644 --- a/substrate/frame/support/src/storage/types/mod.rs +++ b/substrate/frame/support/src/storage/types/mod.rs @@ -196,7 +196,7 @@ mod test { // result query returns error assert_eq!(C::get(), Err(())); - // value query with custom onempty returns 42 + // value query with custom on empty returns 42 assert_eq!(D::get(), 42); }); } diff --git a/substrate/frame/support/src/storage/types/nmap.rs b/substrate/frame/support/src/storage/types/nmap.rs index ff542e841de7..43b4dd49d298 100755 --- a/substrate/frame/support/src/storage/types/nmap.rs +++ b/substrate/frame/support/src/storage/types/nmap.rs @@ -350,7 +350,7 @@ where /// /// # Warning /// - /// `None` does not mean that `get()` does not return a value. The default value is completly + /// `None` does not mean that `get()` does not return a value. The default value is completely /// ignored by this function. pub fn decode_len + TupleToEncodedIter>( key: KArg, diff --git a/substrate/frame/support/src/storage/types/value.rs b/substrate/frame/support/src/storage/types/value.rs index 76e41b6bb562..05e9ae52e44f 100644 --- a/substrate/frame/support/src/storage/types/value.rs +++ b/substrate/frame/support/src/storage/types/value.rs @@ -227,7 +227,7 @@ where /// /// # Warning /// - /// `None` does not mean that `get()` does not return a value. The default value is completly + /// `None` does not mean that `get()` does not return a value. The default value is completely /// ignored by this function. pub fn decode_len() -> Option where @@ -245,7 +245,8 @@ where /// /// # Warning /// - /// - `None` does not mean that `get()` does not return a value. The default value is completly + /// - `None` does not mean that `get()` does not return a value. The default value is + /// completely /// ignored by this function. /// /// - The value returned is the non-deduplicated length of the underlying Vector in storage.This diff --git a/substrate/frame/support/src/traits.rs b/substrate/frame/support/src/traits.rs index 1997d8fc223e..66777cef7b8e 100644 --- a/substrate/frame/support/src/traits.rs +++ b/substrate/frame/support/src/traits.rs @@ -22,8 +22,8 @@ pub mod tokens; pub use tokens::{ currency::{ - ActiveIssuanceOf, Currency, LockIdentifier, LockableCurrency, NamedReservableCurrency, - ReservableCurrency, TotalIssuanceOf, VestingSchedule, + ActiveIssuanceOf, Currency, InspectLockableCurrency, LockIdentifier, LockableCurrency, + NamedReservableCurrency, ReservableCurrency, TotalIssuanceOf, VestingSchedule, }, fungible, fungibles, imbalance::{Imbalance, OnUnbalanced, SignedImbalance}, @@ -87,7 +87,7 @@ pub use hooks::GenesisBuild; pub use hooks::{ BeforeAllRuntimeMigrations, BuildGenesisConfig, Hooks, IntegrityTest, OnFinalize, OnGenesis, OnIdle, OnInitialize, OnPoll, OnRuntimeUpgrade, OnTimestampSet, PostInherents, - PostTransactions, PreInherents, + PostTransactions, PreInherents, UncheckedOnRuntimeUpgrade, }; pub mod schedule; diff --git a/substrate/frame/support/src/traits/dispatch.rs b/substrate/frame/support/src/traits/dispatch.rs index 09d2445e896c..f97814684dec 100644 --- a/substrate/frame/support/src/traits/dispatch.rs +++ b/substrate/frame/support/src/traits/dispatch.rs @@ -540,7 +540,7 @@ pub trait OriginTrait: Sized { }) } - /// Extract a reference to the sytsem origin, if that's what the caller is. + /// Extract a reference to the system origin, if that's what the caller is. fn as_system_ref(&self) -> Option<&RawOrigin> { self.caller().as_system_ref() } diff --git a/substrate/frame/support/src/traits/dynamic_params.rs b/substrate/frame/support/src/traits/dynamic_params.rs index 97406b99c00c..3ef298fc5a5a 100644 --- a/substrate/frame/support/src/traits/dynamic_params.rs +++ b/substrate/frame/support/src/traits/dynamic_params.rs @@ -25,30 +25,30 @@ use frame_support::Parameter; /// A dynamic parameter store across an aggregated KV type. pub trait RuntimeParameterStore { - type AggregratedKeyValue: AggregratedKeyValue; + type AggregatedKeyValue: AggregatedKeyValue; /// Get the value of a parametrized key. /// /// Should return `None` if no explicit value was set instead of a default. fn get(key: K) -> Option where - KV: AggregratedKeyValue, - K: Key + Into<::Key>, - ::Key: IntoKey< - <::AggregratedKeyValue as AggregratedKeyValue>::Key, + KV: AggregatedKeyValue, + K: Key + Into<::Key>, + ::Key: IntoKey< + <::AggregatedKeyValue as AggregatedKeyValue>::Key, >, - <::AggregratedKeyValue as AggregratedKeyValue>::Value: - TryIntoKey<::Value>, - ::Value: TryInto; + <::AggregatedKeyValue as AggregatedKeyValue>::Value: + TryIntoKey<::Value>, + ::Value: TryInto; } /// A dynamic parameter store across a concrete KV type. -pub trait ParameterStore { +pub trait ParameterStore { /// Get the value of a parametrized key. fn get(key: K) -> Option where - K: Key + Into<::Key>, - ::Value: TryInto; + K: Key + Into<::Key>, + ::Value: TryInto; } /// Key of a dynamic parameter. @@ -61,7 +61,7 @@ pub trait Key { } /// The aggregated key-value type of a dynamic parameter store. -pub trait AggregratedKeyValue: Parameter { +pub trait AggregatedKeyValue: Parameter { /// The aggregated key type. type Key: Parameter + MaxEncodedLen; @@ -72,7 +72,7 @@ pub trait AggregratedKeyValue: Parameter { fn into_parts(self) -> (Self::Key, Option); } -impl AggregratedKeyValue for () { +impl AggregatedKeyValue for () { type Key = (); type Value = (); @@ -90,17 +90,17 @@ pub struct ParameterStoreAdapter(core::marker::PhantomData<(PS, KV)>); impl ParameterStore for ParameterStoreAdapter where PS: RuntimeParameterStore, - KV: AggregratedKeyValue, - ::Key: - IntoKey<<::AggregratedKeyValue as AggregratedKeyValue>::Key>, - ::Value: TryFromKey< - <::AggregratedKeyValue as AggregratedKeyValue>::Value, + KV: AggregatedKeyValue, + ::Key: + IntoKey<<::AggregatedKeyValue as AggregatedKeyValue>::Key>, + ::Value: TryFromKey< + <::AggregatedKeyValue as AggregatedKeyValue>::Value, >, { fn get(key: K) -> Option where - K: Key + Into<::Key>, - ::Value: TryInto, + K: Key + Into<::Key>, + ::Value: TryInto, { PS::get::(key) } diff --git a/substrate/frame/support/src/traits/hooks.rs b/substrate/frame/support/src/traits/hooks.rs index 30e053ee8640..620c2e47c9fd 100644 --- a/substrate/frame/support/src/traits/hooks.rs +++ b/substrate/frame/support/src/traits/hooks.rs @@ -169,7 +169,7 @@ pub trait OnGenesis { /// /// This hook is intended to be used internally in FRAME and not be exposed to FRAME developers. /// -/// It is defined as a seperate trait from [`OnRuntimeUpgrade`] precisely to not pollute the public +/// It is defined as a separate trait from [`OnRuntimeUpgrade`] precisely to not pollute the public /// API. pub trait BeforeAllRuntimeMigrations { /// Something that should happen before runtime migrations are executed. @@ -229,6 +229,30 @@ pub trait OnRuntimeUpgrade { } } +/// This trait is intended for use within `VersionedMigration` to execute storage migrations without +/// automatic version checks. Implementations should ensure migration logic is safe and idempotent. +pub trait UncheckedOnRuntimeUpgrade { + /// Called within `VersionedMigration` to execute the actual migration. It is also + /// expected that no version checks are performed within this function. + /// + /// See also [`Hooks::on_runtime_upgrade`]. + fn on_runtime_upgrade() -> Weight { + Weight::zero() + } + + /// See [`Hooks::pre_upgrade`]. + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, TryRuntimeError> { + Ok(Vec::new()) + } + + /// See [`Hooks::post_upgrade`]. + #[cfg(feature = "try-runtime")] + fn post_upgrade(_state: Vec) -> Result<(), TryRuntimeError> { + Ok(()) + } +} + #[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] #[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] #[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] @@ -461,7 +485,9 @@ pub trait Hooks { /// ## Implementation Note: Standalone Migrations /// /// Additional migrations can be created by directly implementing [`OnRuntimeUpgrade`] on - /// structs and passing them to `Executive`. + /// structs and passing them to `Executive`. Or alternatively, by implementing + /// [`UncheckedOnRuntimeUpgrade`], passing it to [`crate::migrations::VersionedMigration`], + /// which already implements [`OnRuntimeUpgrade`]. /// /// ## Implementation Note: Pallet Versioning /// diff --git a/substrate/frame/support/src/traits/misc.rs b/substrate/frame/support/src/traits/misc.rs index b49e51c95918..9ca89569dfe9 100644 --- a/substrate/frame/support/src/traits/misc.rs +++ b/substrate/frame/support/src/traits/misc.rs @@ -1426,7 +1426,7 @@ mod test { assert_eq!(>::max_encoded_len(), 2usize.pow(14) + 4); let data = 4u64; - // Ensure that we check that the `Vec` is consumed completly on decode. + // Ensure that we check that the `Vec` is consumed completely on decode. assert!(WrapperOpaque::::decode(&mut &data.encode().encode()[..]).is_err()); } diff --git a/substrate/frame/support/src/traits/schedule.rs b/substrate/frame/support/src/traits/schedule.rs index 6669b78209d7..a302e28d4ce2 100644 --- a/substrate/frame/support/src/traits/schedule.rs +++ b/substrate/frame/support/src/traits/schedule.rs @@ -131,7 +131,7 @@ impl MaybeHashed { } } -// TODO: deprecate +#[deprecated(note = "Use `v3` instead. Will be removed after September 2024.")] pub mod v1 { use super::*; @@ -219,10 +219,12 @@ pub mod v1 { fn next_dispatch_time(id: Vec) -> Result; } + #[allow(deprecated)] impl Anon for T where T: v2::Anon, { + #[allow(deprecated)] type Address = T::Address; fn schedule( @@ -233,10 +235,13 @@ pub mod v1 { call: Call, ) -> Result { let c = MaybeHashed::::Value(call); + + #[allow(deprecated)] T::schedule(when, maybe_periodic, priority, origin, c) } fn cancel(address: Self::Address) -> Result<(), ()> { + #[allow(deprecated)] T::cancel(address) } @@ -244,18 +249,22 @@ pub mod v1 { address: Self::Address, when: DispatchTime, ) -> Result { + #[allow(deprecated)] T::reschedule(address, when) } fn next_dispatch_time(address: Self::Address) -> Result { + #[allow(deprecated)] T::next_dispatch_time(address) } } + #[allow(deprecated)] impl Named for T where T: v2::Named, { + #[allow(deprecated)] type Address = T::Address; fn schedule_named( @@ -267,10 +276,12 @@ pub mod v1 { call: Call, ) -> Result { let c = MaybeHashed::::Value(call); + #[allow(deprecated)] T::schedule_named(id, when, maybe_periodic, priority, origin, c) } fn cancel_named(id: Vec) -> Result<(), ()> { + #[allow(deprecated)] T::cancel_named(id) } @@ -278,16 +289,18 @@ pub mod v1 { id: Vec, when: DispatchTime, ) -> Result { + #[allow(deprecated)] T::reschedule_named(id, when) } fn next_dispatch_time(id: Vec) -> Result { + #[allow(deprecated)] T::next_dispatch_time(id) } } } -// TODO: deprecate +#[deprecated(note = "Use `v3` instead. Will be removed after September 2024.")] pub mod v2 { use super::*; @@ -479,4 +492,5 @@ pub mod v3 { } } +#[allow(deprecated)] pub use v1::*; diff --git a/substrate/frame/support/src/traits/tokens/currency.rs b/substrate/frame/support/src/traits/tokens/currency.rs index 8b773115011d..b3db4c98001d 100644 --- a/substrate/frame/support/src/traits/tokens/currency.rs +++ b/substrate/frame/support/src/traits/tokens/currency.rs @@ -16,6 +16,9 @@ // limitations under the License. //! The Currency trait and associated types. +//! +//! Note Currency and related traits are deprecated, instead +//! [`fungible`](frame_support::traits::fungible) traits should be used. use super::{ imbalance::{Imbalance, SignedImbalance}, @@ -27,7 +30,7 @@ use sp_runtime::{traits::MaybeSerializeDeserialize, DispatchError}; mod reservable; pub use reservable::{NamedReservableCurrency, ReservableCurrency}; mod lockable; -pub use lockable::{LockIdentifier, LockableCurrency, VestingSchedule}; +pub use lockable::{InspectLockableCurrency, LockIdentifier, LockableCurrency, VestingSchedule}; /// Abstraction over a fungible assets system. pub trait Currency { diff --git a/substrate/frame/support/src/traits/tokens/currency/lockable.rs b/substrate/frame/support/src/traits/tokens/currency/lockable.rs index 955814f5aa9d..51a48dd15ce8 100644 --- a/substrate/frame/support/src/traits/tokens/currency/lockable.rs +++ b/substrate/frame/support/src/traits/tokens/currency/lockable.rs @@ -64,6 +64,12 @@ pub trait LockableCurrency: Currency { fn remove_lock(id: LockIdentifier, who: &AccountId); } +/// A inspect interface for a currency whose accounts can have liquidity restrictions. +pub trait InspectLockableCurrency: LockableCurrency { + /// Amount of funds locked for `who` associated with `id`. + fn balance_locked(id: LockIdentifier, who: &AccountId) -> Self::Balance; +} + /// A vesting schedule over a currency. This allows a particular currency to have vesting limits /// applied to it. pub trait VestingSchedule { diff --git a/substrate/frame/support/src/traits/tokens/fungible/conformance_tests/regular/unbalanced.rs b/substrate/frame/support/src/traits/tokens/fungible/conformance_tests/regular/unbalanced.rs index e7fcc15472e0..a52207368987 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/conformance_tests/regular/unbalanced.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/conformance_tests/regular/unbalanced.rs @@ -94,7 +94,7 @@ where ); assert_eq!(T::balance(&account_0), account_0_initial_balance - amount); - // Decreasing the balance below funds avalibale should fail when Precision::Exact + // Decreasing the balance below funds available should fail when Precision::Exact let balance_before = T::balance(&account_0); assert_eq!( T::decrease_balance( diff --git a/substrate/frame/support/src/traits/tokens/fungible/freeze.rs b/substrate/frame/support/src/traits/tokens/fungible/freeze.rs index 8b542ee4c606..96efbc6ab89e 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/freeze.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/freeze.rs @@ -16,6 +16,9 @@ // limitations under the License. //! The traits for putting freezes within a single fungible token class. +//! +//! See the [`crate::traits::fungible`] doc for more information about fungible traits +//! including the place of the Freezes in FRAME. use scale_info::TypeInfo; use sp_arithmetic::{ @@ -35,7 +38,7 @@ pub trait Inspect: super::Inspect { /// An identifier for a freeze. type Id: codec::Encode + TypeInfo + 'static; - /// Amount of funds held in reserve by `who` for the given `id`. + /// Amount of funds frozen in reserve by `who` for the given `id`. fn balance_frozen(id: &Self::Id, who: &AccountId) -> Self::Balance; /// The amount of the balance which can become frozen. Defaults to `total_balance()`. @@ -45,11 +48,11 @@ pub trait Inspect: super::Inspect { /// Returns `true` if it's possible to introduce a freeze for the given `id` onto the /// account of `who`. This will be true as long as the implementor supports as many - /// concurrent freeze locks as there are possible values of `id`. + /// concurrent freezes as there are possible values of `id`. fn can_freeze(id: &Self::Id, who: &AccountId) -> bool; } -/// Trait for introducing, altering and removing locks to freeze an account's funds so they never +/// Trait for introducing, altering and removing freezes for an account for its funds never /// go below a set minimum. pub trait Mutate: Inspect { /// Prevent actions which would reduce the balance of the account of `who` below the given @@ -66,16 +69,16 @@ pub trait Mutate: Inspect { /// counteract any pre-existing freezes in place for `who` under the `id`. Also unlike /// `set_freeze`, in the case that `amount` is zero, this is no-op and never fails. /// - /// Note that more funds can be locked than the total balance, if desired. + /// Note that more funds can be frozen than the total balance, if desired. fn extend_freeze(id: &Self::Id, who: &AccountId, amount: Self::Balance) -> DispatchResult; - /// Remove an existing lock. + /// Remove an existing freeze. fn thaw(id: &Self::Id, who: &AccountId) -> DispatchResult; /// Attempt to alter the amount frozen under the given `id` to `amount`. /// /// Fail if the account of `who` has fewer freezable funds than `amount`, unless `fortitude` is - /// `Fortitude::Force`. + /// [`Fortitude::Force`]. fn set_frozen( id: &Self::Id, who: &AccountId, @@ -91,7 +94,7 @@ pub trait Mutate: Inspect { /// the amount frozen under `id`. Do nothing otherwise. /// /// Fail if the account of `who` has fewer freezable funds than `amount`, unless `fortitude` is - /// `Fortitude::Force`. + /// [`Fortitude::Force`]. fn ensure_frozen( id: &Self::Id, who: &AccountId, diff --git a/substrate/frame/support/src/traits/tokens/fungible/hold.rs b/substrate/frame/support/src/traits/tokens/fungible/hold.rs index 6da652d2998d..28ece25c91d4 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/hold.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/hold.rs @@ -16,6 +16,9 @@ // limitations under the License. //! The traits for putting holds within a single fungible token class. +//! +//! See the [`crate::traits::fungible`] doc for more information about fungible traits +//! including the place of the Holds in FRAME. use crate::{ ensure, @@ -214,8 +217,8 @@ pub trait Mutate: /// /// The actual amount released is returned with `Ok`. /// - /// If `precision` is `BestEffort`, then the amount actually unreserved and returned as the - /// inner value of `Ok` may be smaller than the `amount` passed. + /// If `precision` is [`Precision::BestEffort`], then the amount actually unreserved and + /// returned as the inner value of `Ok` may be smaller than the `amount` passed. /// /// NOTE! The inner of the `Ok` result variant returns the *actual* amount released. This is the /// opposite of the `ReservableCurrency::unreserve()` result, which gives the amount not able diff --git a/substrate/frame/support/src/traits/tokens/fungible/imbalance.rs b/substrate/frame/support/src/traits/tokens/fungible/imbalance.rs index e30f03cc5f07..98af0826c0dd 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/imbalance.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/imbalance.rs @@ -17,6 +17,8 @@ //! The imbalance type and its associates, which handles keeps everything adding up properly with //! unbalanced operations. +//! +//! See the [`crate::traits::fungible`] doc for more information about fungible traits. use super::{super::Imbalance as ImbalanceT, Balanced, *}; use crate::traits::{ diff --git a/substrate/frame/support/src/traits/tokens/fungible/item_of.rs b/substrate/frame/support/src/traits/tokens/fungible/item_of.rs index 0ad3cb969b34..f6051ab50f70 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/item_of.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/item_of.rs @@ -16,6 +16,11 @@ // limitations under the License. //! Adapter to use `fungibles::*` implementations as `fungible::*`. +//! +//! This allows for a `fungibles` asset, e.g. from the `pallet_assets` pallet, to be used when a +//! `fungible` asset is expected. +//! +//! See the [`crate::traits::fungible`] doc for more information about fungible traits. use super::*; use crate::traits::{ diff --git a/substrate/frame/support/src/traits/tokens/fungible/mod.rs b/substrate/frame/support/src/traits/tokens/fungible/mod.rs index 2adb11b038b3..4780abf4913e 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/mod.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/mod.rs @@ -17,26 +17,135 @@ //! The traits for dealing with a single fungible token class and any associated types. //! -//! ### User-implememted traits -//! - `Inspect`: Regular balance inspector functions. -//! - `Unbalanced`: Low-level balance mutating functions. Does not guarantee proper book-keeping and -//! so should not be called into directly from application code. Other traits depend on this and -//! provide default implementations based on it. -//! - `UnbalancedHold`: Low-level balance mutating functions for balances placed on hold. Does not +//! Also see the [`frame_tokens`] reference docs for more information about the place of +//! `fungible` traits in Substrate. +//! +//! # Avaliable Traits +//! - [`Inspect`]: Regular balance inspector functions. +//! - [`Unbalanced`]: Low-level balance mutating functions. Does not guarantee proper book-keeping +//! and so should not be called into directly from application code. Other traits depend on this +//! and provide default implementations based on it. +//! - [`UnbalancedHold`]: Low-level balance mutating functions for balances placed on hold. Does not //! guarantee proper book-keeping and so should not be called into directly from application code. //! Other traits depend on this and provide default implementations based on it. -//! - `Mutate`: Regular balance mutator functions. Pre-implemented using `Unbalanced`, though the -//! `done_*` functions should likely be reimplemented in case you want to do something following -//! the operation such as emit events. -//! - `InspectHold`: Inspector functions for balances on hold. -//! - `MutateHold`: Mutator functions for balances on hold. Mostly pre-implemented using -//! `UnbalancedHold`. -//! - `InspectFreeze`: Inspector functions for frozen balance. -//! - `MutateFreeze`: Mutator functions for frozen balance. -//! - `Balanced`: One-sided mutator functions for regular balances, which return imbalance objects +//! - [`Mutate`]: Regular balance mutator functions. Pre-implemented using [`Unbalanced`], though +//! the `done_*` functions should likely be reimplemented in case you want to do something +//! following the operation such as emit events. +//! - [`InspectHold`]: Inspector functions for balances on hold. +//! - [`MutateHold`]: Mutator functions for balances on hold. Mostly pre-implemented using +//! [`UnbalancedHold`]. +//! - [`InspectFreeze`]: Inspector functions for frozen balance. +//! - [`MutateFreeze`]: Mutator functions for frozen balance. +//! - [`Balanced`]: One-sided mutator functions for regular balances, which return imbalance objects //! which guarantee eventual book-keeping. May be useful for some sophisticated operations where //! funds must be removed from an account before it is known precisely what should be done with //! them. +//! +//! ## Terminology +//! +//! - **Total Issuance**: The total number of units in existence in a system. +//! +//! - **Total Balance**: The sum of an account's free and held balances. +//! +//! - **Free Balance**: A portion of an account's total balance that is not held. Note this is +//! distinct from the Spendable Balance, which represents how much Balance the user can actually +//! transfer. +//! +//! - **Held Balance**: Held balance still belongs to the account holder, but is suspended. It can +//! be slashed, but only after all the free balance has been slashed. +//! +//! Multiple holds stack rather than overlay. This means that if an account has +//! 3 holds for 100 units, the account can spend its funds for any reason down to 300 units, at +//! which point the holds will start to come into play. +//! +//! - **Frozen Balance**: A freeze on a specified amount of an account's free balance until a +//! specified block number. +//! +//! Multiple freezes always operate over the same funds, so they "overlay" rather than +//! "stack". This means that if an account has 3 freezes for 100 units, the account can spend its +//! funds for any reason down to 100 units, at which point the freezes will start to come into +//! play. +//! +//! - **Minimum Balance (a.k.a. Existential Deposit, a.k.a. ED)**: The minimum balance required to +//! create or keep an account open. This is to prevent "dust accounts" from filling storage. When +//! the free plus the held balance (i.e. the total balance) falls below this, then the account is +//! said to be dead. It loses its functionality as well as any prior history and all information +//! on it is removed from the chain's state. No account should ever have a total balance that is +//! strictly between 0 and the existential deposit (exclusive). If this ever happens, it indicates +//! either a bug in the implementation of this trait or an erroneous raw mutation of storage. +//! +//! - **Untouchable Balance**: The part of a user's free balance they cannot spend, due to ED or +//! Freeze(s). +//! +//! - **Spendable Balance**: The part of a user's free balance they can actually transfer, after +//! accounting for Holds and Freezes. +//! +//! - **Imbalance**: A condition when some funds were credited or debited without equal and opposite +//! accounting (i.e. a difference between total issuance and account balances). Functions that +//! result in an imbalance will return an object of the [`imbalance::Credit`] or +//! [`imbalance::Debt`] traits that can be managed within your runtime logic. +//! +//! If an imbalance is simply dropped, it should automatically maintain any book-keeping such as +//! total issuance. +//! +//! ## Visualising Balance Components Together 💫 +//! +//! ```ignore +//! |__total__________________________________| +//! |__on_hold__|_____________free____________| +//! |__________frozen___________| +//! |__on_hold__|__ed__| +//! |__untouchable__|__spendable__| +//! ``` +//! +//! ## Holds and Freezes +//! +//! Both holds and freezes are used to prevent an account from using some of its balance. +//! +//! The primary distinction between the two are that: +//! - Holds are cumulative (do not overlap) and are distinct from the free balance +//! - Freezes are not cumulative, and can overlap with each other or with holds +//! +//! ```ignore +//! |__total_____________________________| +//! |__hold_a__|__hold_b__|_____free_____| +//! |__on_hold____________| // <- the sum of all holds +//! |__freeze_a_______________| +//! |__freeze_b____| +//! |__freeze_c________| +//! |__frozen_________________| // <- the max of all freezes +//! ``` +//! +//! Holds are designed to be infallibly slashed, meaning that any logic using a `Freeze` +//! must handle the possibility of the frozen amount being reduced, potentially to zero. A +//! permissionless function should be provided in order to allow bookkeeping to be updated in this +//! instance. E.g. some balance is frozen when it is used for voting, one could use held balance for +//! voting, but nothing prevents this frozen balance from being reduced if the overlapping hold is +//! slashed. +//! +//! Every Hold and Freeze is accompanied by a unique `Reason`, making it clear for each instance +//! what the originating pallet and purpose is. These reasons are amalgomated into a single enum +//! `RuntimeHoldReason` and `RuntimeFreezeReason` respectively, when the runtime is compiled. +//! +//! Note that `Hold` and `Freeze` reasons should remain in your runtime for as long as storage +//! could exist in your runtime with those reasons, otherwise your runtime state could become +//! undecodable. +//! +//! ### Should I use a Hold or Freeze? +//! +//! If you require a balance to be infaillibly slashed, then you should use Holds. +//! +//! If you require setting a minimum account balance amount, then you should use a Freezes. Note +//! Freezes do not carry the same guarantees as Holds. Although the account cannot voluntarily +//! reduce their balance below the largest freeze, if Holds on the account are slashed then the +//! balance could drop below the freeze amount. +//! +//! ## Sets of Tokens +//! +//! For managing sets of tokens, see the [`fungibles`](`frame_support::traits::fungibles`) trait +//! which is a wrapper around this trait but supporting multiple asset instances. +//! +//! [`frame_tokens`]: ../../../../polkadot_sdk_docs/reference_docs/frame_tokens/index.html pub mod conformance_tests; pub mod freeze; diff --git a/substrate/frame/support/src/traits/tokens/fungible/regular.rs b/substrate/frame/support/src/traits/tokens/fungible/regular.rs index 9d8e82087079..fc5cee88af2b 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/regular.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/regular.rs @@ -16,6 +16,8 @@ // limitations under the License. //! `Inspect` and `Mutate` traits for working with regular balances. +//! +//! See the [`crate::traits::fungible`] doc for more information about fungible traits. use crate::{ ensure, diff --git a/substrate/frame/support/src/traits/tokens/fungible/union_of.rs b/substrate/frame/support/src/traits/tokens/fungible/union_of.rs index e968fa2c4cb9..2c7ae51cf8ab 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/union_of.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/union_of.rs @@ -17,6 +17,8 @@ //! Types to combine some `fungible::*` and `fungibles::*` implementations into one union //! `fungibles::*` implementation. +//! +//! See the [`crate::traits::fungible`] doc for more information about fungible traits. use codec::{Decode, Encode, MaxEncodedLen}; use core::cmp::Ordering; diff --git a/substrate/frame/support/src/traits/tokens/fungibles/approvals.rs b/substrate/frame/support/src/traits/tokens/fungibles/approvals.rs index 7a80279b0198..09e3d20756a2 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/approvals.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/approvals.rs @@ -16,6 +16,8 @@ // limitations under the License. //! Inspect and Mutate traits for Asset approvals +//! +//! See the [`crate::traits::fungibles`] doc for more information about fungibles traits. use crate::dispatch::DispatchResult; pub trait Inspect: super::Inspect { diff --git a/substrate/frame/support/src/traits/tokens/fungibles/enumerable.rs b/substrate/frame/support/src/traits/tokens/fungibles/enumerable.rs index 08bb784a7dbf..81dbb93b0b8d 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/enumerable.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/enumerable.rs @@ -15,6 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. +//! Contains an interface for enumerating assets in existence or owned by a given account. +//! +//! See the [`crate::traits::fungibles`] doc for more information about fungibles traits. + /// Interface for enumerating assets in existence or owned by a given account. pub trait Inspect: super::Inspect { type AssetsIterator; diff --git a/substrate/frame/support/src/traits/tokens/fungibles/freeze.rs b/substrate/frame/support/src/traits/tokens/fungibles/freeze.rs index b07d20d6c41c..244f70058994 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/freeze.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/freeze.rs @@ -16,6 +16,8 @@ // limitations under the License. //! The traits for putting freezes within a single fungible token class. +//! +//! See the [`crate::traits::fungibles`] doc for more information about fungibles traits. use crate::{ensure, traits::tokens::Fortitude}; use scale_info::TypeInfo; diff --git a/substrate/frame/support/src/traits/tokens/fungibles/hold.rs b/substrate/frame/support/src/traits/tokens/fungibles/hold.rs index 1efd1594213c..ef3fef7a300d 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/hold.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/hold.rs @@ -16,6 +16,8 @@ // limitations under the License. //! The traits for putting holds within a single fungible token class. +//! +//! See the [`crate::traits::fungibles`] doc for more information about fungibles traits. use crate::{ ensure, diff --git a/substrate/frame/support/src/traits/tokens/fungibles/imbalance.rs b/substrate/frame/support/src/traits/tokens/fungibles/imbalance.rs index 280a4515778e..c3b213cc8fc8 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/imbalance.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/imbalance.rs @@ -17,6 +17,8 @@ //! The imbalance type and its associates, which handles keeps everything adding up properly with //! unbalanced operations. +//! +//! See the [`crate::traits::fungibles`] doc for more information about fungibles traits. use super::*; use crate::traits::{ diff --git a/substrate/frame/support/src/traits/tokens/fungibles/lifetime.rs b/substrate/frame/support/src/traits/tokens/fungibles/lifetime.rs index 0e195a52318b..49f6c846ccdd 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/lifetime.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/lifetime.rs @@ -16,6 +16,8 @@ // limitations under the License. //! Traits for creating and destroying assets. +//! +//! See the [`crate::traits::fungibles`] doc for more information about fungibles traits. use sp_runtime::{DispatchError, DispatchResult}; diff --git a/substrate/frame/support/src/traits/tokens/fungibles/metadata.rs b/substrate/frame/support/src/traits/tokens/fungibles/metadata.rs index a111fd475bcd..241de3677cfc 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/metadata.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/metadata.rs @@ -16,6 +16,8 @@ // limitations under the License. //! Inspect and Mutate traits for Asset metadata +//! +//! See the [`crate::traits::fungibles`] doc for more information about fungibles traits. use crate::dispatch::DispatchResult; #[cfg(not(feature = "std"))] diff --git a/substrate/frame/support/src/traits/tokens/fungibles/mod.rs b/substrate/frame/support/src/traits/tokens/fungibles/mod.rs index 1db0706ba4fd..2122fdeaed3f 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/mod.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/mod.rs @@ -15,7 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! The traits for sets of fungible tokens and any associated types. +//! The traits for *sets* of [`fungible`](`frame_support::traits::fungible`) tokens and any +//! associated types. +//! +//! Individual tokens in the `fungibles` set may be used when a `fungible` trait is expected using +//! [`crate::traits::tokens::fungible::ItemOf`]. +//! +//! Also see the [`frame_tokens`] reference docs for more information about the place of +//! `fungible` traits in Substrate. +//! +//! [`frame_tokens`]: ../../../../polkadot_sdk_docs/reference_docs/frame_tokens/index.html pub mod approvals; mod enumerable; diff --git a/substrate/frame/support/src/traits/tokens/fungibles/regular.rs b/substrate/frame/support/src/traits/tokens/fungibles/regular.rs index a7a9e5759ed7..4c8134ec5b0c 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/regular.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/regular.rs @@ -16,6 +16,8 @@ // limitations under the License. //! `Inspect` and `Mutate` traits for working with regular balances. +//! +//! See the [`crate::traits::fungibles`] doc for more information about fungibles traits. use core::marker::PhantomData; diff --git a/substrate/frame/support/src/traits/tokens/fungibles/roles.rs b/substrate/frame/support/src/traits/tokens/fungibles/roles.rs index 5cd1228afbce..4f95ad8368c2 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/roles.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/roles.rs @@ -16,6 +16,8 @@ // limitations under the License. //! Inspect traits for Asset roles +//! +//! See the [`crate::traits::fungibles`] doc for more information about fungibles traits. pub trait Inspect: super::Inspect { // Get owner for an AssetId. diff --git a/substrate/frame/support/src/traits/tokens/fungibles/union_of.rs b/substrate/frame/support/src/traits/tokens/fungibles/union_of.rs index b6b5be299521..55ace74f1d53 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/union_of.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/union_of.rs @@ -16,6 +16,8 @@ // limitations under the License. //! Type to combine two `fungibles::*` implementations into one union `fungibles::*` implementation. +//! +//! See the [`crate::traits::fungibles`] doc for more information about fungibles traits. use frame_support::traits::{ tokens::{ diff --git a/substrate/frame/support/src/traits/tokens/imbalance/split_two_ways.rs b/substrate/frame/support/src/traits/tokens/imbalance/split_two_ways.rs index a3b99cb0b8a5..d79ae562ec67 100644 --- a/substrate/frame/support/src/traits/tokens/imbalance/split_two_ways.rs +++ b/substrate/frame/support/src/traits/tokens/imbalance/split_two_ways.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Means for splitting an imbalance into two and hanlding them differently. +//! Means for splitting an imbalance into two and handling them differently. use super::super::imbalance::{Imbalance, OnUnbalanced}; use core::{marker::PhantomData, ops::Div}; diff --git a/substrate/frame/support/src/traits/tokens/misc.rs b/substrate/frame/support/src/traits/tokens/misc.rs index d7850afe0bc9..b2c1d71dde6e 100644 --- a/substrate/frame/support/src/traits/tokens/misc.rs +++ b/substrate/frame/support/src/traits/tokens/misc.rs @@ -130,7 +130,7 @@ impl WithdrawConsequence { pub enum DepositConsequence { /// Deposit couldn't happen due to the amount being too low. This is usually because the /// account doesn't yet exist and the deposit wouldn't bring it to at least the minimum needed - /// for existance. + /// for existence. BelowMinimum, /// Deposit cannot happen since the account cannot be created (usually because it's a consumer /// and there exists no provider reference). diff --git a/substrate/frame/support/test/Cargo.toml b/substrate/frame/support/test/Cargo.toml index b476b676d6b4..7e858ae78202 100644 --- a/substrate/frame/support/test/Cargo.toml +++ b/substrate/frame/support/test/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] static_assertions = "1.1.0" serde = { features = ["derive"], workspace = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-metadata = { version = "16.0.0", default-features = false, features = ["current"] } sp-api = { path = "../../../primitives/api", default-features = false } sp-arithmetic = { path = "../../../primitives/arithmetic", default-features = false } diff --git a/substrate/frame/support/test/compile_pass/Cargo.toml b/substrate/frame/support/test/compile_pass/Cargo.toml index 0617aa105a21..3f52b4664b18 100644 --- a/substrate/frame/support/test/compile_pass/Cargo.toml +++ b/substrate/frame/support/test/compile_pass/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } renamed-frame-support = { package = "frame-support", path = "../..", default-features = false } renamed-frame-system = { package = "frame-system", path = "../../../system", default-features = false } sp-core = { path = "../../../../primitives/core", default-features = false } diff --git a/substrate/frame/support/test/pallet/Cargo.toml b/substrate/frame/support/test/pallet/Cargo.toml index ca889faef876..7a20c3f27306 100644 --- a/substrate/frame/support/test/pallet/Cargo.toml +++ b/substrate/frame/support/test/pallet/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], workspace = true } frame-support = { path = "../..", default-features = false } frame-system = { path = "../../../system", default-features = false } diff --git a/substrate/frame/support/test/stg_frame_crate/Cargo.toml b/substrate/frame/support/test/stg_frame_crate/Cargo.toml index 632ea4e794f6..554c81ab43de 100644 --- a/substrate/frame/support/test/stg_frame_crate/Cargo.toml +++ b/substrate/frame/support/test/stg_frame_crate/Cargo.toml @@ -16,8 +16,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -frame = { path = "../../..", default-features = false, features = ["experimental", "runtime"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +frame = { package = "polkadot-sdk-frame", path = "../../..", default-features = false, features = ["experimental", "runtime"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } [features] default = ["std"] diff --git a/substrate/frame/support/test/tests/benchmark_ui/bad_attr_pov_mode_1.rs b/substrate/frame/support/test/tests/benchmark_ui/bad_attr_pov_mode_1.rs new file mode 100644 index 000000000000..40ef884bf857 --- /dev/null +++ b/substrate/frame/support/test/tests/benchmark_ui/bad_attr_pov_mode_1.rs @@ -0,0 +1,31 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_benchmarking::v2::*; + +#[benchmarks] +mod benches { + use super::*; + + #[benchmark(pov_mode = Wrong)] + fn bench() { + #[block] + {} + } +} + +fn main() {} diff --git a/substrate/frame/support/test/tests/benchmark_ui/bad_attr_pov_mode_1.stderr b/substrate/frame/support/test/tests/benchmark_ui/bad_attr_pov_mode_1.stderr new file mode 100644 index 000000000000..add80da63070 --- /dev/null +++ b/substrate/frame/support/test/tests/benchmark_ui/bad_attr_pov_mode_1.stderr @@ -0,0 +1,5 @@ +error: expected one of: `MaxEncodedLen`, `Measured`, `Ignored` + --> tests/benchmark_ui/bad_attr_pov_mode_1.rs:24:25 + | +24 | #[benchmark(pov_mode = Wrong)] + | ^^^^^ diff --git a/substrate/frame/support/test/tests/benchmark_ui/bad_attr_pov_mode_2.rs b/substrate/frame/support/test/tests/benchmark_ui/bad_attr_pov_mode_2.rs new file mode 100644 index 000000000000..151bb931e920 --- /dev/null +++ b/substrate/frame/support/test/tests/benchmark_ui/bad_attr_pov_mode_2.rs @@ -0,0 +1,33 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_benchmarking::v2::*; + +#[benchmarks] +mod benches { + use super::*; + + #[benchmark(pov_mode = Measured { + Key: Wrong + })] + fn bench() { + #[block] + {} + } +} + +fn main() {} diff --git a/substrate/frame/support/test/tests/benchmark_ui/bad_attr_pov_mode_2.stderr b/substrate/frame/support/test/tests/benchmark_ui/bad_attr_pov_mode_2.stderr new file mode 100644 index 000000000000..0f9961afd89f --- /dev/null +++ b/substrate/frame/support/test/tests/benchmark_ui/bad_attr_pov_mode_2.stderr @@ -0,0 +1,5 @@ +error: expected one of: `MaxEncodedLen`, `Measured`, `Ignored` + --> tests/benchmark_ui/bad_attr_pov_mode_2.rs:25:8 + | +25 | Key: Wrong + | ^^^^^ diff --git a/substrate/frame/support/test/tests/benchmark_ui/bad_attr_pov_mode_3.rs b/substrate/frame/support/test/tests/benchmark_ui/bad_attr_pov_mode_3.rs new file mode 100644 index 000000000000..9c5e3801b1a3 --- /dev/null +++ b/substrate/frame/support/test/tests/benchmark_ui/bad_attr_pov_mode_3.rs @@ -0,0 +1,31 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_benchmarking::v2::*; + +#[benchmarks] +mod benches { + use super::*; + + #[benchmark(pov_mode)] + fn bench() { + #[block] + {} + } +} + +fn main() {} diff --git a/substrate/frame/support/test/tests/benchmark_ui/bad_attr_pov_mode_3.stderr b/substrate/frame/support/test/tests/benchmark_ui/bad_attr_pov_mode_3.stderr new file mode 100644 index 000000000000..f28a993989a3 --- /dev/null +++ b/substrate/frame/support/test/tests/benchmark_ui/bad_attr_pov_mode_3.stderr @@ -0,0 +1,5 @@ +error: expected `=` + --> tests/benchmark_ui/bad_attr_pov_mode_3.rs:24:22 + | +24 | #[benchmark(pov_mode)] + | ^ diff --git a/substrate/frame/support/test/tests/benchmark_ui/bad_attr_pov_mode_4.rs b/substrate/frame/support/test/tests/benchmark_ui/bad_attr_pov_mode_4.rs new file mode 100644 index 000000000000..11ec5124d289 --- /dev/null +++ b/substrate/frame/support/test/tests/benchmark_ui/bad_attr_pov_mode_4.rs @@ -0,0 +1,31 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_benchmarking::v2::*; + +#[benchmarks] +mod benches { + use super::*; + + #[benchmark(pov_mode =)] + fn bench() { + #[block] + {} + } +} + +fn main() {} diff --git a/substrate/frame/support/test/tests/benchmark_ui/bad_attr_pov_mode_4.stderr b/substrate/frame/support/test/tests/benchmark_ui/bad_attr_pov_mode_4.stderr new file mode 100644 index 000000000000..572b6b0815dc --- /dev/null +++ b/substrate/frame/support/test/tests/benchmark_ui/bad_attr_pov_mode_4.stderr @@ -0,0 +1,5 @@ +error: unexpected end of input, expected one of: `MaxEncodedLen`, `Measured`, `Ignored` + --> tests/benchmark_ui/bad_attr_pov_mode_4.rs:24:24 + | +24 | #[benchmark(pov_mode =)] + | ^ diff --git a/substrate/frame/support/test/tests/benchmark_ui/dup_attr_pov_mode.rs b/substrate/frame/support/test/tests/benchmark_ui/dup_attr_pov_mode.rs new file mode 100644 index 000000000000..f49636d181a5 --- /dev/null +++ b/substrate/frame/support/test/tests/benchmark_ui/dup_attr_pov_mode.rs @@ -0,0 +1,32 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_benchmarking::v2::*; +use frame_support_test::Config; + +#[benchmarks] +mod benches { + use super::*; + + #[benchmark(pov_mode = Measured, pov_mode = MaxEncodedLen)] + fn bench() { + #[block] + {} + } +} + +fn main() {} diff --git a/substrate/frame/support/test/tests/benchmark_ui/dup_attr_pov_mode.stderr b/substrate/frame/support/test/tests/benchmark_ui/dup_attr_pov_mode.stderr new file mode 100644 index 000000000000..aab91d271a69 --- /dev/null +++ b/substrate/frame/support/test/tests/benchmark_ui/dup_attr_pov_mode.stderr @@ -0,0 +1,14 @@ +error: unexpected end of input, `pov_mode` can only be specified once + --> tests/benchmark_ui/dup_attr_pov_mode.rs:25:59 + | +25 | #[benchmark(pov_mode = Measured, pov_mode = MaxEncodedLen)] + | ^ + +error: unused import: `frame_support_test::Config` + --> tests/benchmark_ui/dup_attr_pov_mode.rs:19:5 + | +19 | use frame_support_test::Config; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: `-D unused-imports` implied by `-D warnings` + = help: to override `-D warnings` add `#[allow(unused_imports)]` diff --git a/substrate/frame/support/test/tests/benchmark_ui/pass/valid_attr_pov_mode.rs b/substrate/frame/support/test/tests/benchmark_ui/pass/valid_attr_pov_mode.rs new file mode 100644 index 000000000000..35fa1e76ae5a --- /dev/null +++ b/substrate/frame/support/test/tests/benchmark_ui/pass/valid_attr_pov_mode.rs @@ -0,0 +1,74 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_benchmarking::v2::*; +use frame_support_test::Config; + +#[benchmarks] +mod benches { + use super::*; + + #[benchmark(skip_meta, extra, pov_mode = Measured)] + fn bench1() { + #[block] + {} + } + + #[benchmark(pov_mode = Measured, extra, skip_meta)] + fn bench2() { + #[block] + {} + } + + #[benchmark(extra, pov_mode = Measured { + Pallet: Measured, + Pallet::Storage: MaxEncodedLen, + }, skip_meta)] + fn bench3() { + #[block] + {} + } + + #[benchmark(skip_meta, extra, pov_mode = Measured { + Pallet::Storage: MaxEncodedLen, + Pallet::StorageSubKey: Measured, + })] + fn bench4() { + #[block] + {} + } + + #[benchmark(pov_mode = MaxEncodedLen { + Pallet::Storage: Measured, + Pallet::StorageSubKey: Measured + }, extra, skip_meta)] + fn bench5() { + #[block] + {} + } + + #[benchmark(pov_mode = MaxEncodedLen { + Pallet::Storage: Measured, + Pallet::Storage::Nested: Ignored + }, extra, skip_meta)] + fn bench6() { + #[block] + {} + } +} + +fn main() {} diff --git a/substrate/frame/support/test/tests/benchmark_ui/pass/valid_basic.rs b/substrate/frame/support/test/tests/benchmark_ui/pass/valid_basic.rs index 09e09790b2f9..f432af44d9d4 100644 --- a/substrate/frame/support/test/tests/benchmark_ui/pass/valid_basic.rs +++ b/substrate/frame/support/test/tests/benchmark_ui/pass/valid_basic.rs @@ -24,7 +24,7 @@ use frame_support_test::Config; mod benches { use super::*; - #[benchmark(skip_meta, extra)] + #[benchmark(skip_meta, pov_mode = Measured, extra)] fn bench() { let a = 2 + 2; #[block] diff --git a/substrate/frame/support/test/tests/benchmark_ui/unrecognized_option.stderr b/substrate/frame/support/test/tests/benchmark_ui/unrecognized_option.stderr index bea770b634e2..2eb06e396a85 100644 --- a/substrate/frame/support/test/tests/benchmark_ui/unrecognized_option.stderr +++ b/substrate/frame/support/test/tests/benchmark_ui/unrecognized_option.stderr @@ -1,4 +1,4 @@ -error: expected `extra` or `skip_meta` +error: expected one of: `extra`, `skip_meta`, `pov_mode` --> tests/benchmark_ui/unrecognized_option.rs:26:32 | 26 | #[benchmark(skip_meta, extra, bad)] diff --git a/substrate/frame/support/test/tests/derive_impl_ui/attached_to_non_impl.rs b/substrate/frame/support/test/tests/derive_impl_ui/attached_to_non_impl.rs index 332e1e78730e..3f3148a03a9f 100644 --- a/substrate/frame/support/test/tests/derive_impl_ui/attached_to_non_impl.rs +++ b/substrate/frame/support/test/tests/derive_impl_ui/attached_to_non_impl.rs @@ -50,7 +50,7 @@ impl Animal for FourLeggedAnimal { } } -pub struct AcquaticMammal {} +pub struct AquaticMammal {} #[derive_impl(FourLeggedAnimal as Animal)] struct Something {} diff --git a/substrate/frame/support/test/tests/derive_impl_ui/bad_default_impl_path.rs b/substrate/frame/support/test/tests/derive_impl_ui/bad_default_impl_path.rs index bc118acdf1e6..7093c32400bd 100644 --- a/substrate/frame/support/test/tests/derive_impl_ui/bad_default_impl_path.rs +++ b/substrate/frame/support/test/tests/derive_impl_ui/bad_default_impl_path.rs @@ -50,14 +50,14 @@ impl Animal for FourLeggedAnimal { } } -pub struct AcquaticMammal {} +pub struct AquaticMammal {} // Should throw: `error: cannot find macro `__export_tokens_tt_tiger` in this scope` // // Note that there is really no better way to clean up this error, tt_call suffers from the // same downside but this is really the only rough edge when using macro magic. #[derive_impl(Tiger as Animal)] -impl Animal for AcquaticMammal { +impl Animal for AquaticMammal { type Locomotion = (Swims, RunsOnFourLegs); type Environment = (Land, Sea); } diff --git a/substrate/frame/support/test/tests/derive_impl_ui/bad_disambiguation_path.rs b/substrate/frame/support/test/tests/derive_impl_ui/bad_disambiguation_path.rs index 9535ac3deda6..8e0253e45c17 100644 --- a/substrate/frame/support/test/tests/derive_impl_ui/bad_disambiguation_path.rs +++ b/substrate/frame/support/test/tests/derive_impl_ui/bad_disambiguation_path.rs @@ -50,10 +50,10 @@ impl Animal for FourLeggedAnimal { } } -pub struct AcquaticMammal {} +pub struct AquaticMammal {} #[derive_impl(FourLeggedAnimal as Insect)] -impl Animal for AcquaticMammal { +impl Animal for AquaticMammal { type Locomotion = (Swims, RunsOnFourLegs); type Environment = (Land, Sea); } diff --git a/substrate/frame/support/test/tests/derive_impl_ui/missing_disambiguation_path.rs b/substrate/frame/support/test/tests/derive_impl_ui/missing_disambiguation_path.rs index f26c28313e51..4914ceea0b6d 100644 --- a/substrate/frame/support/test/tests/derive_impl_ui/missing_disambiguation_path.rs +++ b/substrate/frame/support/test/tests/derive_impl_ui/missing_disambiguation_path.rs @@ -50,10 +50,10 @@ impl Animal for FourLeggedAnimal { } } -pub struct AcquaticMammal {} +pub struct AquaticMammal {} #[derive_impl(FourLeggedAnimal as)] -impl Animal for AcquaticMammal { +impl Animal for AquaticMammal { type Locomotion = (Swims, RunsOnFourLegs); type Environment = (Land, Sea); } diff --git a/substrate/frame/support/test/tests/derive_impl_ui/pass/basic_overriding.rs b/substrate/frame/support/test/tests/derive_impl_ui/pass/basic_overriding.rs index 37c0742f195d..20744b8cba26 100644 --- a/substrate/frame/support/test/tests/derive_impl_ui/pass/basic_overriding.rs +++ b/substrate/frame/support/test/tests/derive_impl_ui/pass/basic_overriding.rs @@ -51,19 +51,19 @@ impl Animal for FourLeggedAnimal { } } -pub struct AcquaticMammal {} +pub struct AquaticMammal {} // without omitting the `as X` #[derive_impl(FourLeggedAnimal as Animal)] -impl Animal for AcquaticMammal { +impl Animal for AquaticMammal { type Locomotion = (Swims, RunsOnFourLegs); type Environment = (Land, Sea); } -assert_type_eq_all!(::Locomotion, (Swims, RunsOnFourLegs)); -assert_type_eq_all!(::Environment, (Land, Sea)); -assert_type_eq_all!(::Diet, Omnivore); -assert_type_eq_all!(::SleepingStrategy, Diurnal); +assert_type_eq_all!(::Locomotion, (Swims, RunsOnFourLegs)); +assert_type_eq_all!(::Environment, (Land, Sea)); +assert_type_eq_all!(::Diet, Omnivore); +assert_type_eq_all!(::SleepingStrategy, Diurnal); pub struct Lion {} diff --git a/substrate/frame/support/test/tests/pallet_outer_enums_explicit.rs b/substrate/frame/support/test/tests/pallet_outer_enums_explicit.rs index a99bc99c71a7..f78b4c312de5 100644 --- a/substrate/frame/support/test/tests/pallet_outer_enums_explicit.rs +++ b/substrate/frame/support/test/tests/pallet_outer_enums_explicit.rs @@ -84,7 +84,7 @@ fn module_error_outer_enum_expand_explicit() { // Check that all error types are propagated match RuntimeError::Example(pallet::Error::InsufficientProposersBalance) { - // Error passed implicitely to the pallet system. + // Error passed implicitly to the pallet system. RuntimeError::System(system) => match system { frame_system::Error::InvalidSpecName => (), frame_system::Error::SpecVersionNeedsToIncrease => (), diff --git a/substrate/frame/support/test/tests/pallet_outer_enums_implicit.rs b/substrate/frame/support/test/tests/pallet_outer_enums_implicit.rs index 062128a14267..55403d7d2f5d 100644 --- a/substrate/frame/support/test/tests/pallet_outer_enums_implicit.rs +++ b/substrate/frame/support/test/tests/pallet_outer_enums_implicit.rs @@ -64,11 +64,11 @@ frame_support::construct_runtime!( // Exclude part `Storage` in order not to check its metadata in tests. System: frame_system exclude_parts { Storage }, - // Pallet exposes `Error` implicitely. + // Pallet exposes `Error` implicitly. Example: common::outer_enums::pallet, Instance1Example: common::outer_enums::pallet::, - // Pallet exposes `Error` implicitely. + // Pallet exposes `Error` implicitly. Example2: common::outer_enums::pallet2, Instance1Example2: common::outer_enums::pallet2::, @@ -84,7 +84,7 @@ fn module_error_outer_enum_expand_implicit() { // Check that all error types are propagated match RuntimeError::Example(pallet::Error::InsufficientProposersBalance) { - // Error passed implicitely to the pallet system. + // Error passed implicitly to the pallet system. RuntimeError::System(system) => match system { frame_system::Error::InvalidSpecName => (), frame_system::Error::SpecVersionNeedsToIncrease => (), diff --git a/substrate/frame/support/test/tests/pallet_ui/event_type_invalid_bound_no_frame_crate.rs b/substrate/frame/support/test/tests/pallet_ui/event_type_invalid_bound_no_frame_crate.rs index 573ceb6dfab7..b510beb54dda 100644 --- a/substrate/frame/support/test/tests/pallet_ui/event_type_invalid_bound_no_frame_crate.rs +++ b/substrate/frame/support/test/tests/pallet_ui/event_type_invalid_bound_no_frame_crate.rs @@ -17,13 +17,13 @@ #[frame_support::pallet] mod pallet { - use frame::deps::frame_system::pallet_prelude::BlockNumberFor; + use polkadot_sdk_frame::deps::frame_system::pallet_prelude::BlockNumberFor; use frame_support::pallet_prelude::{Hooks, IsType}; #[pallet::config] - pub trait Config: frame::deps::frame_system::Config { + pub trait Config: polkadot_sdk_frame::deps::frame_system::Config { type Bar: Clone + std::fmt::Debug + Eq; - type RuntimeEvent: IsType<::RuntimeEvent> + type RuntimeEvent: IsType<::RuntimeEvent> + From>; } diff --git a/substrate/frame/support/test/tests/pallet_ui/event_type_invalid_bound_no_frame_crate.stderr b/substrate/frame/support/test/tests/pallet_ui/event_type_invalid_bound_no_frame_crate.stderr index 0f805c972e4d..384e44d97a61 100644 --- a/substrate/frame/support/test/tests/pallet_ui/event_type_invalid_bound_no_frame_crate.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/event_type_invalid_bound_no_frame_crate.stderr @@ -1,5 +1,5 @@ error: Invalid `type RuntimeEvent`, associated type `RuntimeEvent` is reserved and must bound: `IsType<::RuntimeEvent>` --> tests/pallet_ui/event_type_invalid_bound_no_frame_crate.rs:26:3 | -26 | type RuntimeEvent: IsType<::RuntimeEvent> +26 | type RuntimeEvent: IsType<::RuntimeEvent> | ^^^^ diff --git a/substrate/frame/support/test/tests/pallet_ui/pass/inherited_call_weight3.rs b/substrate/frame/support/test/tests/pallet_ui/pass/inherited_call_weight3.rs index f40d1040858f..ddccd0b3e192 100644 --- a/substrate/frame/support/test/tests/pallet_ui/pass/inherited_call_weight3.rs +++ b/substrate/frame/support/test/tests/pallet_ui/pass/inherited_call_weight3.rs @@ -18,7 +18,7 @@ use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; -// If, for whatever reason, you dont to not use a `WeightInfo` trait - it will still work. +// If, for whatever reason, you don't to not use a `WeightInfo` trait - it will still work. struct Impl; impl Impl { diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr index 4229d1e8a545..d269e6d2726d 100644 --- a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr @@ -13,8 +13,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied | = help: the following other types implement trait `WrapperTypeDecode`: Box - Rc frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes + Rc Arc = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` @@ -65,8 +65,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied bytes::bytes::Bytes Cow<'a, T> parity_scale_codec::Ref<'a, T, U> - Rc frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes + Rc Arc Vec and $N others @@ -106,8 +106,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied | = help: the following other types implement trait `WrapperTypeDecode`: Box - Rc frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes + Rc Arc = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` @@ -148,8 +148,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied bytes::bytes::Bytes Cow<'a, T> parity_scale_codec::Ref<'a, T, U> - Rc frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes + Rc Arc Vec and $N others @@ -168,8 +168,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied | = help: the following other types implement trait `WrapperTypeDecode`: Box - Rc frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes + Rc Arc = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` @@ -210,8 +210,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied bytes::bytes::Bytes Cow<'a, T> parity_scale_codec::Ref<'a, T, U> - Rc frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes + Rc Arc Vec and $N others diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr index 855d289d0a16..13d761d65d20 100644 --- a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr @@ -13,8 +13,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied | = help: the following other types implement trait `WrapperTypeDecode`: Box - Rc frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes + Rc Arc = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` @@ -65,8 +65,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied bytes::bytes::Bytes Cow<'a, T> parity_scale_codec::Ref<'a, T, U> - Rc frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes + Rc Arc Vec and $N others @@ -106,8 +106,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied | = help: the following other types implement trait `WrapperTypeDecode`: Box - Rc frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes + Rc Arc = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` @@ -148,8 +148,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied bytes::bytes::Bytes Cow<'a, T> parity_scale_codec::Ref<'a, T, U> - Rc frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes + Rc Arc Vec and $N others @@ -168,8 +168,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied | = help: the following other types implement trait `WrapperTypeDecode`: Box - Rc frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes + Rc Arc = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` @@ -210,8 +210,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied bytes::bytes::Bytes Cow<'a, T> parity_scale_codec::Ref<'a, T, U> - Rc frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes + Rc Arc Vec and $N others diff --git a/substrate/frame/support/test/tests/versioned_migration.rs b/substrate/frame/support/test/tests/versioned_migration.rs index 95f9a476cc1e..d5dc0a8f6fe3 100644 --- a/substrate/frame/support/test/tests/versioned_migration.rs +++ b/substrate/frame/support/test/tests/versioned_migration.rs @@ -25,7 +25,7 @@ use frame_support::{ construct_runtime, derive_impl, migrations::VersionedMigration, parameter_types, - traits::{GetStorageVersion, OnRuntimeUpgrade, StorageVersion}, + traits::{GetStorageVersion, OnRuntimeUpgrade, StorageVersion, UncheckedOnRuntimeUpgrade}, weights::constants::RocksDbWeight, }; use frame_system::Config; @@ -105,9 +105,11 @@ parameter_types! { static PostUpgradeCalledWith: Vec = Vec::new(); } -/// Implement `OnRuntimeUpgrade` for `SomeUnversionedMigration`. +/// Implement `UncheckedOnRuntimeUpgrade` for `SomeUnversionedMigration`. /// It sets SomeStorage to S, and returns a weight derived from UpgradeReads and UpgradeWrites. -impl OnRuntimeUpgrade for SomeUnversionedMigration { +impl UncheckedOnRuntimeUpgrade + for SomeUnversionedMigration +{ fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { PreUpgradeCalled::set(true); Ok(PreUpgradeReturnBytes::get().to_vec()) diff --git a/substrate/frame/system/Cargo.toml b/substrate/frame/system/Cargo.toml index 2a6b68f4bd70..bf8275191c23 100644 --- a/substrate/frame/system/Cargo.toml +++ b/substrate/frame/system/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] cfg-if = "1.0" codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } serde = { features = ["alloc", "derive"], workspace = true } frame-support = { path = "../support", default-features = false } sp-core = { path = "../../primitives/core", default-features = false, features = ["serde"] } @@ -27,7 +27,7 @@ sp-io = { path = "../../primitives/io", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } sp-version = { path = "../../primitives/version", default-features = false, features = ["serde"] } sp-weights = { path = "../../primitives/weights", default-features = false, features = ["serde"] } -docify = "0.2.7" +docify = "0.2.8" [dev-dependencies] criterion = "0.4.0" diff --git a/substrate/frame/system/benchmarking/Cargo.toml b/substrate/frame/system/benchmarking/Cargo.toml index 363577bfe9f3..cedf00d783b8 100644 --- a/substrate/frame/system/benchmarking/Cargo.toml +++ b/substrate/frame/system/benchmarking/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../../benchmarking", default-features = false } frame-support = { path = "../../support", default-features = false } frame-system = { path = "..", default-features = false } diff --git a/substrate/frame/system/src/limits.rs b/substrate/frame/system/src/limits.rs index 5fd7a5af8757..ab5a98a6b974 100644 --- a/substrate/frame/system/src/limits.rs +++ b/substrate/frame/system/src/limits.rs @@ -378,7 +378,7 @@ impl BlockWeightsBuilder { /// class, based on the allowance. /// /// This is to make sure that extrinsics don't stay forever in the pool, - /// because they could seamingly fit the block (since they are below `max_block`), + /// because they could seemingly fit the block (since they are below `max_block`), /// but the cost of calling `on_initialize` always prevents them from being included. pub fn avg_block_initialization(mut self, init_cost: Perbill) -> Self { self.init_cost = Some(init_cost); diff --git a/substrate/frame/system/src/offchain.rs b/substrate/frame/system/src/offchain.rs index 3be518e9af2f..e62912268ceb 100644 --- a/substrate/frame/system/src/offchain.rs +++ b/substrate/frame/system/src/offchain.rs @@ -22,7 +22,7 @@ //! This module provides transaction related helpers to: //! - Submit a raw unsigned transaction //! - Submit an unsigned transaction with a signed payload -//! - Submit a signed transction. +//! - Submit a signed transaction. //! //! ## Usage //! @@ -386,7 +386,7 @@ where /// /// // runtime-specific public key /// type Public = MultiSigner: From; -/// type Signature = MulitSignature: From; +/// type Signature = MultiSignature: From; /// ``` // TODO [#5662] Potentially use `IsWrappedBy` types, or find some other way to make it easy to // obtain unwrapped crypto (and wrap it back). @@ -446,7 +446,7 @@ pub trait SigningTypes: crate::Config { /// A public key that is capable of identifying `AccountId`s. /// /// Usually that's either a raw crypto public key (e.g. `sr25519::Public`) or - /// an aggregate type for multiple crypto public keys, like `MulitSigner`. + /// an aggregate type for multiple crypto public keys, like `MultiSigner`. type Public: Clone + PartialEq + IdentifyAccount diff --git a/substrate/frame/timestamp/Cargo.toml b/substrate/frame/timestamp/Cargo.toml index 8bd659b14460..b69c3cc3749f 100644 --- a/substrate/frame/timestamp/Cargo.toml +++ b/substrate/frame/timestamp/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } @@ -29,7 +29,7 @@ sp-runtime = { path = "../../primitives/runtime", default-features = false } sp-storage = { path = "../../primitives/storage", default-features = false } sp-timestamp = { path = "../../primitives/timestamp", default-features = false } -docify = "0.2.7" +docify = "0.2.8" [dev-dependencies] sp-core = { path = "../../primitives/core" } diff --git a/substrate/frame/tips/Cargo.toml b/substrate/frame/tips/Cargo.toml index 457a9bfbf63b..e768205a5ca4 100644 --- a/substrate/frame/tips/Cargo.toml +++ b/substrate/frame/tips/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/tips/src/migrations/mod.rs b/substrate/frame/tips/src/migrations/mod.rs index 9cdd01c17fbf..a7917bfce16f 100644 --- a/substrate/frame/tips/src/migrations/mod.rs +++ b/substrate/frame/tips/src/migrations/mod.rs @@ -17,7 +17,7 @@ /// Version 4. /// -/// For backward compatability reasons, pallet-tips uses `Treasury` for storage module prefix +/// For backward compatibility reasons, pallet-tips uses `Treasury` for storage module prefix /// before calling this migration. After calling this migration, it will get replaced with /// own storage identifier. pub mod v4; diff --git a/substrate/frame/transaction-payment/Cargo.toml b/substrate/frame/transaction-payment/Cargo.toml index 3383a271bc98..be75700f6627 100644 --- a/substrate/frame/transaction-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml b/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml index ff533a947696..52cb0e0114f4 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml @@ -23,7 +23,7 @@ frame-system = { path = "../../system", default-features = false } pallet-asset-conversion = { path = "../../asset-conversion", default-features = false } pallet-transaction-payment = { path = "..", default-features = false } codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } [dev-dependencies] sp-core = { path = "../../../primitives/core", default-features = false } diff --git a/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml b/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml index d3fc04438bd6..724f98f8aa03 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml @@ -28,7 +28,7 @@ frame-benchmarking = { path = "../../benchmarking", default-features = false, op # Other dependencies codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } [dev-dependencies] diff --git a/substrate/frame/transaction-payment/src/tests.rs b/substrate/frame/transaction-payment/src/tests.rs index d3a1721ccb99..bc0efd2d64a3 100644 --- a/substrate/frame/transaction-payment/src/tests.rs +++ b/substrate/frame/transaction-payment/src/tests.rs @@ -384,7 +384,7 @@ fn query_call_info_and_fee_details_works() { adjusted_weight_fee: info .weight .min(BlockWeights::get().max_block) - .ref_time() as u64 * 2 * 3 / 2 /* weight * weight_fee * multipler */ + .ref_time() as u64 * 2 * 3 / 2 /* weight * weight_fee * multiplier */ }), tip: 0, }, diff --git a/substrate/frame/transaction-storage/Cargo.toml b/substrate/frame/transaction-storage/Cargo.toml index cc4610bab57c..2118937d463e 100644 --- a/substrate/frame/transaction-storage/Cargo.toml +++ b/substrate/frame/transaction-storage/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = { version = "6.1", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/transaction-storage/README.md b/substrate/frame/transaction-storage/README.md index 1066968469d4..b173c0a84d5a 100644 --- a/substrate/frame/transaction-storage/README.md +++ b/substrate/frame/transaction-storage/README.md @@ -79,7 +79,7 @@ ipfs block get /ipfs/ > kitten.jpeg ``` To renew data and prevent it from being disposed after the storage period, use `transactionStorage.renew(block, index)` -where `block` is the block number of the previous store or renew transction, and index is the index of that transaction +where `block` is the block number of the previous store or renew transaction, and index is the index of that transaction in the block. diff --git a/substrate/frame/transaction-storage/src/lib.rs b/substrate/frame/transaction-storage/src/lib.rs index c203ed6d0a82..ae61f5c157fa 100644 --- a/substrate/frame/transaction-storage/src/lib.rs +++ b/substrate/frame/transaction-storage/src/lib.rs @@ -139,7 +139,7 @@ pub mod pallet { InvalidProof, /// Missing storage proof. MissingProof, - /// Unable to verify proof becasue state data is missing. + /// Unable to verify proof because state data is missing. MissingStateData, /// Double proof check in the block. DoubleCheck, diff --git a/substrate/frame/transaction-storage/src/tests.rs b/substrate/frame/transaction-storage/src/tests.rs index e17b3ca3bebd..621f74804ecc 100644 --- a/substrate/frame/transaction-storage/src/tests.rs +++ b/substrate/frame/transaction-storage/src/tests.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Tests for transction-storage pallet. +//! Tests for transaction-storage pallet. use super::{Pallet as TransactionStorage, *}; use crate::mock::*; @@ -53,8 +53,8 @@ fn discards_data() { }; run_to_block(11, proof_provider); assert!(Transactions::::get(1).is_some()); - let transctions = Transactions::::get(1).unwrap(); - assert_eq!(transctions.len(), 2); + let transactions = Transactions::::get(1).unwrap(); + assert_eq!(transactions.len(), 2); assert_eq!(ChunkCount::::get(1), 16); run_to_block(12, proof_provider); assert!(Transactions::::get(1).is_none()); diff --git a/substrate/frame/treasury/Cargo.toml b/substrate/frame/treasury/Cargo.toml index 416d321cf355..39df3bb47921 100644 --- a/substrate/frame/treasury/Cargo.toml +++ b/substrate/frame/treasury/Cargo.toml @@ -20,9 +20,9 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", "max-encoded-len", ] } -docify = "0.2.7" +docify = "0.2.8" impl-trait-for-tuples = "0.2.2" -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/treasury/src/lib.rs b/substrate/frame/treasury/src/lib.rs index 647d33a4bc86..acb0fa2ca23b 100644 --- a/substrate/frame/treasury/src/lib.rs +++ b/substrate/frame/treasury/src/lib.rs @@ -623,7 +623,7 @@ pub mod pallet { with_context::>, _>(|v| { let context = v.or_default(); - // We group based on `max_amount`, to dinstinguish between different kind of + // We group based on `max_amount`, to distinguish between different kind of // origins. (assumes that all origins have different `max_amount`) // // Worst case is that we reject some "valid" request. @@ -1044,7 +1044,7 @@ impl, I: 'static> Pallet { /// ### Invariants of proposal storage items /// /// 1. [`ProposalCount`] >= Number of elements in [`Proposals`]. - /// 2. Each entry in [`Proposals`] should be saved under a key stricly less than current + /// 2. Each entry in [`Proposals`] should be saved under a key strictly less than current /// [`ProposalCount`]. /// 3. Each [`ProposalIndex`] contained in [`Approvals`] should exist in [`Proposals`]. /// Note, that this automatically implies [`Approvals`].count() <= [`Proposals`].count(). @@ -1080,7 +1080,7 @@ impl, I: 'static> Pallet { /// ## Invariants of spend storage items /// /// 1. [`SpendCount`] >= Number of elements in [`Spends`]. - /// 2. Each entry in [`Spends`] should be saved under a key stricly less than current + /// 2. Each entry in [`Spends`] should be saved under a key strictly less than current /// [`SpendCount`]. /// 3. For each spend entry contained in [`Spends`] we should have spend.expire_at /// > spend.valid_from. diff --git a/substrate/frame/tx-pause/Cargo.toml b/substrate/frame/tx-pause/Cargo.toml index 91d03c02f3e8..469c05e98f32 100644 --- a/substrate/frame/tx-pause/Cargo.toml +++ b/substrate/frame/tx-pause/Cargo.toml @@ -16,11 +16,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -docify = "0.2.7" +docify = "0.2.8" frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-runtime = { path = "../../primitives/runtime", default-features = false } pallet-balances = { path = "../balances", default-features = false, optional = true } pallet-utility = { path = "../utility", default-features = false, optional = true } diff --git a/substrate/frame/uniques/Cargo.toml b/substrate/frame/uniques/Cargo.toml index 1ccd43711e9b..c28f22bd779f 100644 --- a/substrate/frame/uniques/Cargo.toml +++ b/substrate/frame/uniques/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/uniques/src/migration.rs b/substrate/frame/uniques/src/migration.rs index ba0855a6bb65..90d44e7790d6 100644 --- a/substrate/frame/uniques/src/migration.rs +++ b/substrate/frame/uniques/src/migration.rs @@ -18,15 +18,15 @@ //! Various pieces of common functionality. use super::*; use core::marker::PhantomData; -use frame_support::traits::{Get, OnRuntimeUpgrade}; +use frame_support::traits::{Get, UncheckedOnRuntimeUpgrade}; mod v1 { use super::*; /// Actual implementation of the storage migration. - pub struct MigrateToV1Impl(PhantomData<(T, I)>); + pub struct UncheckedMigrateToV1Impl(PhantomData<(T, I)>); - impl, I: 'static> OnRuntimeUpgrade for MigrateToV1Impl { + impl, I: 'static> UncheckedOnRuntimeUpgrade for UncheckedMigrateToV1Impl { fn on_runtime_upgrade() -> frame_support::weights::Weight { let mut count = 0; for (collection, detail) in Collection::::iter() { @@ -49,7 +49,7 @@ mod v1 { pub type MigrateV0ToV1 = frame_support::migrations::VersionedMigration< 0, 1, - v1::MigrateToV1Impl, + v1::UncheckedMigrateToV1Impl, Pallet, ::DbWeight, >; diff --git a/substrate/frame/uniques/src/tests.rs b/substrate/frame/uniques/src/tests.rs index 5f39b43ac943..4f9632517c90 100644 --- a/substrate/frame/uniques/src/tests.rs +++ b/substrate/frame/uniques/src/tests.rs @@ -288,7 +288,7 @@ fn transfer_owner_should_work() { assert_eq!(Balances::reserved_balance(&2), 0); assert_eq!(Balances::reserved_balance(&3), 45); - // 2's acceptence from before is reset when it became owner, so it cannot be transfered + // 2's acceptance from before is reset when it became owner, so it cannot be transferred // without a fresh acceptance. assert_noop!( Uniques::transfer_ownership(RuntimeOrigin::signed(3), 0, 2), @@ -691,7 +691,7 @@ fn approved_account_gets_reset_after_transfer() { assert_ok!(Uniques::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3)); assert_ok!(Uniques::transfer(RuntimeOrigin::signed(2), 0, 42, 5)); - // this shouldn't work because we have just transfered the item to another account. + // this shouldn't work because we have just transferred the item to another account. assert_noop!( Uniques::transfer(RuntimeOrigin::signed(3), 0, 42, 4), Error::::NoPermission diff --git a/substrate/frame/utility/Cargo.toml b/substrate/frame/utility/Cargo.toml index 8decb600f694..4e41f913252d 100644 --- a/substrate/frame/utility/Cargo.toml +++ b/substrate/frame/utility/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/utility/README.md b/substrate/frame/utility/README.md index 00fff76cd626..0a6769ae1c7c 100644 --- a/substrate/frame/utility/README.md +++ b/substrate/frame/utility/README.md @@ -17,7 +17,7 @@ This module contains two basic pieces of functionality: need multiple distinct accounts (e.g. as controllers for many staking accounts), but where it's perfectly fine to have each of them controlled by the same underlying keypair. Derivative accounts are, for the purposes of proxy filtering considered exactly the same as - the oigin and are thus hampered with the origin's filters. + the origin and are thus hampered with the origin's filters. Since proxy filters are respected in all dispatches of this module, it should never need to be filtered by any proxy. diff --git a/substrate/frame/vesting/Cargo.toml b/substrate/frame/vesting/Cargo.toml index d0018b92c221..a8dbb64c304e 100644 --- a/substrate/frame/vesting/Cargo.toml +++ b/substrate/frame/vesting/Cargo.toml @@ -20,7 +20,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/vesting/src/migrations.rs b/substrate/frame/vesting/src/migrations.rs index 76af8918a010..c2142a7e39e7 100644 --- a/substrate/frame/vesting/src/migrations.rs +++ b/substrate/frame/vesting/src/migrations.rs @@ -30,7 +30,7 @@ pub mod v1 { log::debug!( target: "runtime::vesting", - "migration: Vesting storage version v1 PRE migration checks succesful!" + "migration: Vesting storage version v1 PRE migration checks successful!" ); Ok(()) diff --git a/substrate/frame/whitelist/Cargo.toml b/substrate/frame/whitelist/Cargo.toml index 564393bb8c20..09c4a4b1627d 100644 --- a/substrate/frame/whitelist/Cargo.toml +++ b/substrate/frame/whitelist/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/whitelist/src/benchmarking.rs b/substrate/frame/whitelist/src/benchmarking.rs index 50fd42f9574e..cbe6ee4becd0 100644 --- a/substrate/frame/whitelist/src/benchmarking.rs +++ b/substrate/frame/whitelist/src/benchmarking.rs @@ -20,58 +20,57 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; -use frame_benchmarking::v1::{benchmarks, BenchmarkError}; -use frame_support::{ensure, traits::EnsureOrigin}; +use frame_benchmarking::v2::*; +use frame_support::traits::EnsureOrigin; #[cfg(test)] use crate::Pallet as Whitelist; -benchmarks! { - whitelist_call { +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn whitelist_call() -> Result<(), BenchmarkError> { let origin = T::WhitelistOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; let call_hash = Default::default(); - }: _(origin, call_hash) - verify { - ensure!( - WhitelistedCall::::contains_key(call_hash), - "call not whitelisted" - ); - ensure!( - T::Preimages::is_requested(&call_hash), - "preimage not requested" - ); + + #[extrinsic_call] + _(origin as T::RuntimeOrigin, call_hash); + + ensure!(WhitelistedCall::::contains_key(call_hash), "call not whitelisted"); + ensure!(T::Preimages::is_requested(&call_hash), "preimage not requested"); + Ok(()) } - remove_whitelisted_call { + #[benchmark] + fn remove_whitelisted_call() -> Result<(), BenchmarkError> { let origin = T::WhitelistOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; let call_hash = Default::default(); Pallet::::whitelist_call(origin.clone(), call_hash) .expect("whitelisting call must be successful"); - }: _(origin, call_hash) - verify { - ensure!( - !WhitelistedCall::::contains_key(call_hash), - "whitelist not removed" - ); - ensure!( - !T::Preimages::is_requested(&call_hash), - "preimage still requested" - ); + + #[extrinsic_call] + _(origin as T::RuntimeOrigin, call_hash); + + ensure!(!WhitelistedCall::::contains_key(call_hash), "whitelist not removed"); + ensure!(!T::Preimages::is_requested(&call_hash), "preimage still requested"); + Ok(()) } // We benchmark with the maximum possible size for a call. // If the resulting weight is too big, maybe it worth having a weight which depends // on the size of the call, with a new witness in parameter. - #[pov_mode = MaxEncodedLen { + #[benchmark(pov_mode = MaxEncodedLen { // Use measured PoV size for the Preimages since we pass in a length witness. Preimage::PreimageFor: Measured - }] - dispatch_whitelisted_call { - // NOTE: we remove `10` because we need some bytes to encode the variants and vec length - let n in 1 .. T::Preimages::MAX_LENGTH as u32 - 10; - + })] + // NOTE: we remove `10` because we need some bytes to encode the variants and vec length + fn dispatch_whitelisted_call( + n: Linear<1, { T::Preimages::MAX_LENGTH as u32 - 10 }>, + ) -> Result<(), BenchmarkError> { let origin = T::DispatchWhitelistedOrigin::try_successful_origin() .map_err(|_| BenchmarkError::Weightless)?; let remark = alloc::vec![1u8; n as usize]; @@ -86,21 +85,16 @@ benchmarks! { T::Preimages::note(encoded_call.into()).unwrap(); - }: _(origin, call_hash, call_encoded_len, call_weight) - verify { - ensure!( - !WhitelistedCall::::contains_key(call_hash), - "whitelist not removed" - ); - ensure!( - !T::Preimages::is_requested(&call_hash), - "preimage still requested" - ); - } + #[extrinsic_call] + _(origin as T::RuntimeOrigin, call_hash, call_encoded_len, call_weight); - dispatch_whitelisted_call_with_preimage { - let n in 1 .. 10_000; + ensure!(!WhitelistedCall::::contains_key(call_hash), "whitelist not removed"); + ensure!(!T::Preimages::is_requested(&call_hash), "preimage still requested"); + Ok(()) + } + #[benchmark] + fn dispatch_whitelisted_call_with_preimage(n: Linear<1, 10_000>) -> Result<(), BenchmarkError> { let origin = T::DispatchWhitelistedOrigin::try_successful_origin() .map_err(|_| BenchmarkError::Weightless)?; let remark = alloc::vec![1u8; n as usize]; @@ -110,16 +104,13 @@ benchmarks! { Pallet::::whitelist_call(origin.clone(), call_hash) .expect("whitelisting call must be successful"); - }: _(origin, Box::new(call)) - verify { - ensure!( - !WhitelistedCall::::contains_key(call_hash), - "whitelist not removed" - ); - ensure!( - !T::Preimages::is_requested(&call_hash), - "preimage still requested" - ); + + #[extrinsic_call] + _(origin as T::RuntimeOrigin, Box::new(call)); + + ensure!(!WhitelistedCall::::contains_key(call_hash), "whitelist not removed"); + ensure!(!T::Preimages::is_requested(&call_hash), "preimage still requested"); + Ok(()) } impl_benchmark_test_suite!(Whitelist, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/substrate/primitives/api/Cargo.toml b/substrate/primitives/api/Cargo.toml index fc7c8c7a16c9..6fd7570d54f2 100644 --- a/substrate/primitives/api/Cargo.toml +++ b/substrate/primitives/api/Cargo.toml @@ -27,7 +27,7 @@ sp-state-machine = { path = "../state-machine", default-features = false, option sp-trie = { path = "../trie", default-features = false, optional = true } hash-db = { version = "0.16.0", optional = true } thiserror = { optional = true, workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = [ +scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } sp-metadata-ir = { path = "../metadata-ir", default-features = false, optional = true } diff --git a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs index b7e5600a017a..87a381fd7bf9 100644 --- a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -797,7 +797,7 @@ fn generate_runtime_api_versions(impls: &[ItemImpl]) -> Result { } Ok(quote!( - const RUNTIME_API_VERSIONS: #c::ApisVec = #c::create_apis_vec!([ #( #result ),* ]); + pub const RUNTIME_API_VERSIONS: #c::ApisVec = #c::create_apis_vec!([ #( #result ),* ]); #( #sections )* )) diff --git a/substrate/primitives/api/proc-macro/src/utils.rs b/substrate/primitives/api/proc-macro/src/utils.rs index c8c1f12d90a1..a6570a98f1f7 100644 --- a/substrate/primitives/api/proc-macro/src/utils.rs +++ b/substrate/primitives/api/proc-macro/src/utils.rs @@ -34,7 +34,9 @@ pub fn generate_crate_access() -> TokenStream { quote!(#renamed_name::__private) }, Err(e) => - if let Ok(FoundCrate::Name(name)) = crate_name(&"frame") { + if let Ok(FoundCrate::Name(name)) = + crate_name(&"polkadot-sdk-frame").or_else(|_| crate_name(&"frame")) + { let path = format!("{}::deps::sp_api::__private", name); let path = syn::parse_str::(&path).expect("is a valid path; qed"); quote!( #path ) diff --git a/substrate/primitives/api/test/Cargo.toml b/substrate/primitives/api/test/Cargo.toml index 3a90553bbf00..52a4bd7bda30 100644 --- a/substrate/primitives/api/test/Cargo.toml +++ b/substrate/primitives/api/test/Cargo.toml @@ -26,11 +26,11 @@ codec = { package = "parity-scale-codec", version = "3.6.1" } sp-state-machine = { path = "../../state-machine" } trybuild = "1.0.88" rustversion = "1.0.6" -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } [dev-dependencies] criterion = "0.4.0" -futures = "0.3.21" +futures = "0.3.30" log = { workspace = true, default-features = true } sp-core = { path = "../../core" } static_assertions = "1.1.0" diff --git a/substrate/primitives/application-crypto/Cargo.toml b/substrate/primitives/application-crypto/Cargo.toml index 146925100841..41930945f056 100644 --- a/substrate/primitives/application-crypto/Cargo.toml +++ b/substrate/primitives/application-crypto/Cargo.toml @@ -20,7 +20,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] sp-core = { path = "../core", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["alloc", "derive"], workspace = true } sp-io = { path = "../io", default-features = false } diff --git a/substrate/primitives/application-crypto/src/lib.rs b/substrate/primitives/application-crypto/src/lib.rs index 7e7a4ee821a3..37949d7c41de 100644 --- a/substrate/primitives/application-crypto/src/lib.rs +++ b/substrate/primitives/application-crypto/src/lib.rs @@ -28,7 +28,7 @@ pub use sp_core::crypto::{DeriveError, Pair, SecretStringError}; #[doc(hidden)] pub use sp_core::{ self, - crypto::{ByteArray, CryptoType, Derive, IsWrappedBy, Public, UncheckedFrom, Wraps}, + crypto::{ByteArray, CryptoType, Derive, IsWrappedBy, Public, Signature, UncheckedFrom, Wraps}, RuntimeDebug, }; @@ -509,6 +509,12 @@ macro_rules! app_crypto_signature_common { } } + impl AsMut<[u8]> for Signature { + fn as_mut(&mut self) -> &mut [u8] { + self.0.as_mut() + } + } + impl $crate::AppSignature for Signature { type Generic = $sig; } @@ -529,6 +535,12 @@ macro_rules! app_crypto_signature_common { } } + impl $crate::Signature for Signature {} + + impl $crate::ByteArray for Signature { + const LEN: usize = <$sig>::LEN; + } + impl Signature { /// Convert into wrapped generic signature type. pub fn into_inner(self) -> $sig { diff --git a/substrate/primitives/arithmetic/Cargo.toml b/substrate/primitives/arithmetic/Cargo.toml index 29c406b10b7f..16eae43c73fa 100644 --- a/substrate/primitives/arithmetic/Cargo.toml +++ b/substrate/primitives/arithmetic/Cargo.toml @@ -23,9 +23,11 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = ] } integer-sqrt = "0.1.2" num-traits = { version = "0.2.17", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], optional = true, workspace = true } static_assertions = "1.1.0" +sp-std = { path = "../std", default-features = false } +docify = "0.2.8" [dev-dependencies] criterion = "0.4.0" @@ -41,6 +43,7 @@ std = [ "scale-info/std", "serde/std", "sp-crypto-hashing/std", + "sp-std/std", ] # Serde support without relying on std features. serde = ["dep:serde", "scale-info/serde"] diff --git a/substrate/primitives/arithmetic/fuzzer/src/fixed_point.rs b/substrate/primitives/arithmetic/fuzzer/src/fixed_point.rs index e76dd1503e39..e2d31065635e 100644 --- a/substrate/primitives/arithmetic/fuzzer/src/fixed_point.rs +++ b/substrate/primitives/arithmetic/fuzzer/src/fixed_point.rs @@ -66,7 +66,7 @@ fn main() { let c = FixedI64::saturating_from_integer(x.saturating_add(y)); assert_eq!(a.saturating_add(b), c); - // Check substraction. + // Check subtraction. let a = FixedI64::saturating_from_integer(x); let b = FixedI64::saturating_from_integer(y); let c = FixedI64::saturating_from_integer(x.saturating_sub(y)); diff --git a/substrate/primitives/arithmetic/src/fixed_point.rs b/substrate/primitives/arithmetic/src/fixed_point.rs index 46c09df21868..c4e9259c5fc9 100644 --- a/substrate/primitives/arithmetic/src/fixed_point.rs +++ b/substrate/primitives/arithmetic/src/fixed_point.rs @@ -16,6 +16,33 @@ // limitations under the License. //! Decimal Fixed Point implementations for Substrate runtime. +//! Similar to types that implement [`PerThing`](crate::per_things), these are also +//! fixed-point types, however, they are able to represent larger fractions: +#![doc = docify::embed!("./src/lib.rs", fixed_u64)] +//! +//! ### Fixed Point Types in Practice +//! +//! If one needs to exceed the value of one (1), then +//! [`FixedU64`](FixedU64) (and its signed and `u128` counterparts) can be utilized. +//! Take for example this very rudimentary pricing mechanism, where we wish to calculate the demand +//! / supply to get a price for some on-chain compute: +#![doc = docify::embed!( + "./src/lib.rs", + fixed_u64_block_computation_example +)] +//! +//! For a much more comprehensive example, be sure to look at the source for broker (the "coretime") +//! pallet. +//! +//! #### Fixed Point Types in Practice +//! +//! Just as with [`PerThing`](PerThing), you can also perform regular mathematical +//! expressions: +#![doc = docify::embed!( + "./src/lib.rs", + fixed_u64_operation_example +)] +//! use crate::{ helpers_128bit::{multiply_by_rational_with_rounding, sqrt}, @@ -541,7 +568,7 @@ macro_rules! implement_fixed { let v = self.0 as u128; // Want x' = sqrt(x) where x = n/D and x' = n'/D (D is fixed) - // Our prefered way is: + // Our preferred way is: // sqrt(n/D) = sqrt(nD / D^2) = sqrt(nD)/sqrt(D^2) = sqrt(nD)/D // ergo n' = sqrt(nD) // but this requires nD to fit into our type. diff --git a/substrate/primitives/arithmetic/src/lib.rs b/substrate/primitives/arithmetic/src/lib.rs index 358a841fd0d0..4b0728b90381 100644 --- a/substrate/primitives/arithmetic/src/lib.rs +++ b/substrate/primitives/arithmetic/src/lib.rs @@ -102,7 +102,7 @@ where fn tcmp(&self, other: &T, threshold: T) -> Ordering { // early exit. if threshold.is_zero() { - return self.cmp(other) + return self.cmp(other); } let upper_bound = other.saturating_add(threshold); @@ -207,12 +207,12 @@ where // Nothing to do here. if count.is_zero() { - return Ok(Vec::::new()) + return Ok(Vec::::new()); } let diff = targeted_sum.max(sum) - targeted_sum.min(sum); if diff.is_zero() { - return Ok(input.to_vec()) + return Ok(input.to_vec()); } let needs_bump = targeted_sum > sum; @@ -255,7 +255,7 @@ where min_index += 1; min_index %= count; } - leftover -= One::one() + leftover -= One::one(); } } else { // must decrease the stakes a bit. decrement from the max element. index of maximum is now @@ -289,7 +289,7 @@ where if output_with_idx[max_index].1 <= threshold { max_index = max_index.checked_sub(1).unwrap_or(count - 1); } - leftover -= One::one() + leftover -= One::one(); } else { max_index = max_index.checked_sub(1).unwrap_or(count - 1); } @@ -301,7 +301,7 @@ where targeted_sum, "sum({:?}) != {:?}", output_with_idx, - targeted_sum, + targeted_sum ); // sort again based on the original index. @@ -357,7 +357,7 @@ mod normalize_tests { vec![ Perbill::from_parts(333333334), Perbill::from_parts(333333333), - Perbill::from_parts(333333333), + Perbill::from_parts(333333333) ] ); @@ -368,7 +368,7 @@ mod normalize_tests { vec![ Perbill::from_parts(316666668), Perbill::from_parts(383333332), - Perbill::from_parts(300000000), + Perbill::from_parts(300000000) ] ); } @@ -379,13 +379,13 @@ mod normalize_tests { // could have a situation where the sum cannot be calculated in the inner type. Calculating // using the upper type of the per_thing should assure this to be okay. assert_eq!( - vec![PerU16::from_percent(40), PerU16::from_percent(40), PerU16::from_percent(40),] + vec![PerU16::from_percent(40), PerU16::from_percent(40), PerU16::from_percent(40)] .normalize(PerU16::one()) .unwrap(), vec![ PerU16::from_parts(21845), // 33% PerU16::from_parts(21845), // 33% - PerU16::from_parts(21845), // 33% + PerU16::from_parts(21845) // 33% ] ); } @@ -429,6 +429,88 @@ mod normalize_tests { } } +#[cfg(test)] +mod per_and_fixed_examples { + use super::*; + + #[docify::export] + #[test] + fn percent_mult() { + let percent = Percent::from_rational(5u32, 100u32); // aka, 5% + let five_percent_of_100 = percent * 100u32; // 5% of 100 is 5. + assert_eq!(five_percent_of_100, 5) + } + #[docify::export] + #[test] + fn perbill_example() { + let p = Perbill::from_percent(80); + // 800000000 bil, or a representative of 0.800000000. + // Precision is in the billions place. + assert_eq!(p.deconstruct(), 800000000); + } + + #[docify::export] + #[test] + fn percent_example() { + let percent = Percent::from_rational(190u32, 400u32); + assert_eq!(percent.deconstruct(), 47); + } + + #[docify::export] + #[test] + fn fixed_u64_block_computation_example() { + // Calculate a very rudimentary on-chain price from supply / demand + // Supply: Cores available per block + // Demand: Cores being ordered per block + let price = FixedU64::from_rational(5u128, 10u128); + + // 0.5 DOT per core + assert_eq!(price, FixedU64::from_float(0.5)); + + // Now, the story has changed - lots of demand means we buy as many cores as there + // available. This also means that price goes up! For the sake of simplicity, we don't care + // about who gets a core - just about our very simple price model + + // Calculate a very rudimentary on-chain price from supply / demand + // Supply: Cores available per block + // Demand: Cores being ordered per block + let price = FixedU64::from_rational(19u128, 10u128); + + // 1.9 DOT per core + assert_eq!(price, FixedU64::from_float(1.9)); + } + + #[docify::export] + #[test] + fn fixed_u64() { + // The difference between this and perthings is perthings operates within the relam of [0, + // 1] In cases where we need > 1, we can used fixed types such as FixedU64 + + let rational_1 = FixedU64::from_rational(10, 5); //" 200%" aka 2. + let rational_2 = FixedU64::from_rational_with_rounding(5, 10, Rounding::Down); // "50%" aka 0.50... + + assert_eq!(rational_1, (2u64).into()); + assert_eq!(rational_2.into_perbill(), Perbill::from_float(0.5)); + } + + #[docify::export] + #[test] + fn fixed_u64_operation_example() { + let rational_1 = FixedU64::from_rational(10, 5); // "200%" aka 2. + let rational_2 = FixedU64::from_rational(8, 5); // "160%" aka 1.6. + + let addition = rational_1 + rational_2; + let multiplication = rational_1 * rational_2; + let division = rational_1 / rational_2; + let subtraction = rational_1 - rational_2; + + assert_eq!(addition, FixedU64::from_float(3.6)); + assert_eq!(multiplication, FixedU64::from_float(3.2)); + assert_eq!(division, FixedU64::from_float(1.25)); + assert_eq!(subtraction, FixedU64::from_float(0.4)); + } +} + #[cfg(test)] mod threshold_compare_tests { use super::*; @@ -441,15 +523,15 @@ mod threshold_compare_tests { let e = Perbill::from_percent(10).mul_ceil(b); // [115 - 11,5 (103,5), 115 + 11,5 (126,5)] is all equal - assert_eq!(103u32.tcmp(&b, e), Ordering::Equal); - assert_eq!(104u32.tcmp(&b, e), Ordering::Equal); - assert_eq!(115u32.tcmp(&b, e), Ordering::Equal); - assert_eq!(120u32.tcmp(&b, e), Ordering::Equal); - assert_eq!(126u32.tcmp(&b, e), Ordering::Equal); - assert_eq!(127u32.tcmp(&b, e), Ordering::Equal); - - assert_eq!(128u32.tcmp(&b, e), Ordering::Greater); - assert_eq!(102u32.tcmp(&b, e), Ordering::Less); + assert_eq!((103u32).tcmp(&b, e), Ordering::Equal); + assert_eq!((104u32).tcmp(&b, e), Ordering::Equal); + assert_eq!((115u32).tcmp(&b, e), Ordering::Equal); + assert_eq!((120u32).tcmp(&b, e), Ordering::Equal); + assert_eq!((126u32).tcmp(&b, e), Ordering::Equal); + assert_eq!((127u32).tcmp(&b, e), Ordering::Equal); + + assert_eq!((128u32).tcmp(&b, e), Ordering::Greater); + assert_eq!((102u32).tcmp(&b, e), Ordering::Less); } #[test] @@ -459,15 +541,15 @@ mod threshold_compare_tests { let e = Perbill::from_parts(100) * b; // [115 - 11,5 (103,5), 115 + 11,5 (126,5)] is all equal - assert_eq!(103u32.tcmp(&b, e), 103u32.cmp(&b)); - assert_eq!(104u32.tcmp(&b, e), 104u32.cmp(&b)); - assert_eq!(115u32.tcmp(&b, e), 115u32.cmp(&b)); - assert_eq!(120u32.tcmp(&b, e), 120u32.cmp(&b)); - assert_eq!(126u32.tcmp(&b, e), 126u32.cmp(&b)); - assert_eq!(127u32.tcmp(&b, e), 127u32.cmp(&b)); - - assert_eq!(128u32.tcmp(&b, e), 128u32.cmp(&b)); - assert_eq!(102u32.tcmp(&b, e), 102u32.cmp(&b)); + assert_eq!((103u32).tcmp(&b, e), (103u32).cmp(&b)); + assert_eq!((104u32).tcmp(&b, e), (104u32).cmp(&b)); + assert_eq!((115u32).tcmp(&b, e), (115u32).cmp(&b)); + assert_eq!((120u32).tcmp(&b, e), (120u32).cmp(&b)); + assert_eq!((126u32).tcmp(&b, e), (126u32).cmp(&b)); + assert_eq!((127u32).tcmp(&b, e), (127u32).cmp(&b)); + + assert_eq!((128u32).tcmp(&b, e), (128u32).cmp(&b)); + assert_eq!((102u32).tcmp(&b, e), (102u32).cmp(&b)); } #[test] diff --git a/substrate/primitives/arithmetic/src/per_things.rs b/substrate/primitives/arithmetic/src/per_things.rs index 057bfd7bf885..f73dbe30cec1 100644 --- a/substrate/primitives/arithmetic/src/per_things.rs +++ b/substrate/primitives/arithmetic/src/per_things.rs @@ -15,6 +15,42 @@ // See the License for the specific language governing permissions and // limitations under the License. +//! Types that implement [`PerThing`](PerThing) can be used as a floating-point alternative for +//! numbers that operate within the realm of `[0, 1]`. The primary types may you encounter in +//! Substrate would be the following: +//! - [`Percent`](Percent) - parts of one hundred. +//! - [`Permill`](Permill) - parts of a million. +//! - [`Perbill`](Perbill) - parts of a billion. +//! +//! In use, you may see them being used as follows: +//! +//! > **[`Perbill`](Perbill), parts of a billion** +#![doc = docify::embed!("./src/lib.rs", perbill_example)] +//! > **[`Percent`](Percent), parts of a hundred** +#![doc = docify::embed!("./src/lib.rs", percent_example)] +//! +//! Note that `Percent` is represented as a _rounded down_, fixed point +//! number (see the example above). Unlike primitive types, types that implement +//! [`PerThing`](PerThing) will also not overflow, and are therefore safe to use. +//! They adopt the same behavior that a saturated calculation would provide, meaning that if one is +//! to go over "100%", it wouldn't overflow, but simply stop at the upper or lower bound. +//! +//! For use cases which require precision beyond the range of `[0, 1]`, there are fixed-point types +//! which can be used. +//! +//! Each of these can be used to construct and represent ratios within our runtime. +//! You will find types like [`Perbill`](Perbill) being used often in pallet +//! development. `pallet_referenda` is a good example of a pallet which makes good use of fixed +//! point arithmetic, as it relies on representing various curves and thresholds relating to +//! governance. +//! +//! #### Fixed Point Arithmetic with [`PerThing`](PerThing) +//! +//! As stated, one can also perform mathematics using these types directly. For example, finding the +//! percentage of a particular item: + +#![doc = docify::embed!("./src/lib.rs", percent_mult)] + #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; diff --git a/substrate/primitives/authority-discovery/Cargo.toml b/substrate/primitives/authority-discovery/Cargo.toml index 8ee8bb94ed97..88d93f400596 100644 --- a/substrate/primitives/authority-discovery/Cargo.toml +++ b/substrate/primitives/authority-discovery/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-api = { path = "../api", default-features = false } sp-application-crypto = { path = "../application-crypto", default-features = false } sp-runtime = { path = "../runtime", default-features = false } diff --git a/substrate/primitives/blockchain/Cargo.toml b/substrate/primitives/blockchain/Cargo.toml index 9d13d627eebc..e716b61bfeb1 100644 --- a/substrate/primitives/blockchain/Cargo.toml +++ b/substrate/primitives/blockchain/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -futures = "0.3.21" +futures = "0.3.30" log = { workspace = true, default-features = true } parking_lot = "0.12.1" schnellru = "0.2.1" diff --git a/substrate/primitives/blockchain/src/backend.rs b/substrate/primitives/blockchain/src/backend.rs index 8208f9128e71..7a09865f858d 100644 --- a/substrate/primitives/blockchain/src/backend.rs +++ b/substrate/primitives/blockchain/src/backend.rs @@ -187,7 +187,7 @@ pub trait Backend: /// a block with the given `base_hash`. /// /// The search space is always limited to blocks which are in the finalized - /// chain or descendents of it. + /// chain or descendants of it. /// /// Returns `Ok(None)` if `base_hash` is not found in search space. // TODO: document time complexity of this, see [#1444](https://github.com/paritytech/substrate/issues/1444) diff --git a/substrate/primitives/blockchain/src/error.rs b/substrate/primitives/blockchain/src/error.rs index 9c4181de8189..45fab11a4583 100644 --- a/substrate/primitives/blockchain/src/error.rs +++ b/substrate/primitives/blockchain/src/error.rs @@ -32,7 +32,7 @@ pub enum ApplyExtrinsicFailed { /// The transaction cannot be included into the current block. /// /// This doesn't necessary mean that the transaction itself is invalid, but it might be just - /// unappliable onto the current block. + /// unapplicable onto the current block. #[error("Extrinsic is not valid: {0:?}")] Validity(#[from] TransactionValidityError), diff --git a/substrate/primitives/blockchain/src/header_metadata.rs b/substrate/primitives/blockchain/src/header_metadata.rs index 08b3c9ab3dfb..ccd640c0567a 100644 --- a/substrate/primitives/blockchain/src/header_metadata.rs +++ b/substrate/primitives/blockchain/src/header_metadata.rs @@ -178,7 +178,7 @@ pub struct TreeRoute { impl TreeRoute { /// Creates a new `TreeRoute`. /// - /// To preserve the structure safety invariats it is required that `pivot < route.len()`. + /// To preserve the structure safety invariants it is required that `pivot < route.len()`. pub fn new(route: Vec>, pivot: usize) -> Result { if pivot < route.len() { Ok(TreeRoute { route, pivot }) @@ -212,7 +212,7 @@ impl TreeRoute { ) } - /// Get a slice of enacted blocks (descendents of the common ancestor) + /// Get a slice of enacted blocks (descendants of the common ancestor) pub fn enacted(&self) -> &[HashAndNumber] { &self.route[self.pivot + 1..] } diff --git a/substrate/primitives/consensus/aura/Cargo.toml b/substrate/primitives/consensus/aura/Cargo.toml index 0cedc59ea8fb..b689c84f158c 100644 --- a/substrate/primitives/consensus/aura/Cargo.toml +++ b/substrate/primitives/consensus/aura/Cargo.toml @@ -16,9 +16,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.74", optional = true } +async-trait = { version = "0.1.79", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-api = { path = "../../api", default-features = false } sp-application-crypto = { path = "../../application-crypto", default-features = false } sp-consensus-slots = { path = "../slots", default-features = false } diff --git a/substrate/primitives/consensus/babe/Cargo.toml b/substrate/primitives/consensus/babe/Cargo.toml index 724b9fd3e289..2420f48b1f4a 100644 --- a/substrate/primitives/consensus/babe/Cargo.toml +++ b/substrate/primitives/consensus/babe/Cargo.toml @@ -16,9 +16,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.74", optional = true } +async-trait = { version = "0.1.79", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-api = { path = "../../api", default-features = false } sp-application-crypto = { path = "../../application-crypto", default-features = false } diff --git a/substrate/primitives/consensus/babe/src/lib.rs b/substrate/primitives/consensus/babe/src/lib.rs index 6eb75b270a02..ee07da6829f5 100644 --- a/substrate/primitives/consensus/babe/src/lib.rs +++ b/substrate/primitives/consensus/babe/src/lib.rs @@ -266,7 +266,7 @@ impl Default for BabeEpochConfiguration { } /// Verifies the equivocation proof by making sure that: both headers have -/// different hashes, are targetting the same slot, and have valid signatures by +/// different hashes, are targeting the same slot, and have valid signatures by /// the same authority. pub fn check_equivocation_proof(proof: EquivocationProof) -> bool where @@ -298,7 +298,7 @@ where let first_pre_digest = find_pre_digest(&proof.first_header)?; let second_pre_digest = find_pre_digest(&proof.second_header)?; - // both headers must be targetting the same slot and it must + // both headers must be targeting the same slot and it must // be the same as the one in the proof. if proof.slot != first_pre_digest.slot() || first_pre_digest.slot() != second_pre_digest.slot() diff --git a/substrate/primitives/consensus/beefy/Cargo.toml b/substrate/primitives/consensus/beefy/Cargo.toml index fbcc6e0c1048..a16d943b9146 100644 --- a/substrate/primitives/consensus/beefy/Cargo.toml +++ b/substrate/primitives/consensus/beefy/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["alloc", "derive"], workspace = true } sp-api = { path = "../../api", default-features = false } sp-application-crypto = { path = "../../application-crypto", default-features = false } @@ -26,7 +26,7 @@ sp-io = { path = "../../io", default-features = false } sp-mmr-primitives = { path = "../../merkle-mountain-range", default-features = false } sp-runtime = { path = "../../runtime", default-features = false } sp-keystore = { path = "../../keystore", default-features = false } -strum = { version = "0.24.1", features = ["derive"], default-features = false } +strum = { version = "0.26.2", features = ["derive"], default-features = false } lazy_static = { version = "1.4.0", optional = true } [dev-dependencies] diff --git a/substrate/primitives/consensus/beefy/src/lib.rs b/substrate/primitives/consensus/beefy/src/lib.rs index 52210d6e74f6..9f7d8a3382c8 100644 --- a/substrate/primitives/consensus/beefy/src/lib.rs +++ b/substrate/primitives/consensus/beefy/src/lib.rs @@ -201,7 +201,7 @@ pub mod ecdsa_bls_crypto { fn verify(&self, signature: &::Signature, msg: &[u8]) -> bool { // We can not simply call // `EcdsaBlsPair::verify(signature.as_inner_ref(), msg, self.as_inner_ref())` - // because that invokes ECDSA default verification which perfoms Blake2b hash + // because that invokes ECDSA default verification which performs Blake2b hash // which we don't want. This is because ECDSA signatures are meant to be verified // on Ethereum network where Keccak hasher is significantly cheaper than Blake2b. // See Figure 3 of [OnSc21](https://www.scitepress.org/Papers/2021/106066/106066.pdf) diff --git a/substrate/primitives/consensus/beefy/src/mmr.rs b/substrate/primitives/consensus/beefy/src/mmr.rs index 4c3b477785a1..34bc9352b3a7 100644 --- a/substrate/primitives/consensus/beefy/src/mmr.rs +++ b/substrate/primitives/consensus/beefy/src/mmr.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! BEEFY + MMR utilties. +//! BEEFY + MMR utilities. //! //! While BEEFY can be used completely independently as an additional consensus gadget, //! it is designed around a main use case of bridging standalone networks together. @@ -78,7 +78,7 @@ pub struct MmrLeaf { /// /// Given that adding new struct elements in SCALE is backward compatible (i.e. old format can be /// still decoded, the new fields will simply be ignored). We expect the major version to be bumped -/// very rarely (hopefuly never). +/// very rarely (hopefully never). #[derive(Debug, Default, PartialEq, Eq, Clone, Encode, Decode, TypeInfo)] pub struct MmrLeafVersion(u8); impl MmrLeafVersion { diff --git a/substrate/primitives/consensus/beefy/src/payload.rs b/substrate/primitives/consensus/beefy/src/payload.rs index 14760eb0754d..96b329d19e2d 100644 --- a/substrate/primitives/consensus/beefy/src/payload.rs +++ b/substrate/primitives/consensus/beefy/src/payload.rs @@ -44,7 +44,7 @@ pub mod known_payloads { pub struct Payload(Vec<(BeefyPayloadId, Vec)>); impl Payload { - /// Construct a new payload given an initial vallue + /// Construct a new payload given an initial value pub fn from_single_entry(id: BeefyPayloadId, value: Vec) -> Self { Self(vec![(id, value)]) } diff --git a/substrate/primitives/consensus/common/Cargo.toml b/substrate/primitives/consensus/common/Cargo.toml index 048e31b0265f..90aeadd5055e 100644 --- a/substrate/primitives/consensus/common/Cargo.toml +++ b/substrate/primitives/consensus/common/Cargo.toml @@ -17,8 +17,8 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.74" -futures = { version = "0.3.21", features = ["thread-pool"] } +async-trait = "0.1.79" +futures = { version = "0.3.30", features = ["thread-pool"] } log = { workspace = true, default-features = true } thiserror = { workspace = true } sp-core = { path = "../../core" } @@ -27,7 +27,7 @@ sp-runtime = { path = "../../runtime" } sp-state-machine = { path = "../../state-machine" } [dev-dependencies] -futures = "0.3.21" +futures = "0.3.30" sp-test-primitives = { path = "../../test-primitives" } [features] diff --git a/substrate/primitives/consensus/common/src/lib.rs b/substrate/primitives/consensus/common/src/lib.rs index 6505d005deb8..01d3b7a24f9c 100644 --- a/substrate/primitives/consensus/common/src/lib.rs +++ b/substrate/primitives/consensus/common/src/lib.rs @@ -182,7 +182,7 @@ pub trait Proposer { + Send + Unpin + 'static; - /// The supported proof recording by the implementator of this trait. See [`ProofRecording`] + /// The supported proof recording by the implementor of this trait. See [`ProofRecording`] /// for more information. type ProofRecording: self::ProofRecording + Send + Sync + 'static; /// The proof type used by [`Self::ProofRecording`]. diff --git a/substrate/primitives/consensus/grandpa/Cargo.toml b/substrate/primitives/consensus/grandpa/Cargo.toml index 1f2da55c5a16..6c228383d003 100644 --- a/substrate/primitives/consensus/grandpa/Cargo.toml +++ b/substrate/primitives/consensus/grandpa/Cargo.toml @@ -20,7 +20,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } grandpa = { package = "finality-grandpa", version = "0.16.2", default-features = false, features = ["derive-codec"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-api = { path = "../../api", default-features = false } sp-application-crypto = { path = "../../application-crypto", default-features = false } diff --git a/substrate/primitives/consensus/sassafras/Cargo.toml b/substrate/primitives/consensus/sassafras/Cargo.toml index 085709d4c8b5..07304ed9b240 100644 --- a/substrate/primitives/consensus/sassafras/Cargo.toml +++ b/substrate/primitives/consensus/sassafras/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] scale-codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], optional = true, workspace = true } sp-api = { path = "../../api", default-features = false } sp-application-crypto = { path = "../../application-crypto", default-features = false, features = ["bandersnatch-experimental"] } diff --git a/substrate/primitives/consensus/sassafras/src/ticket.rs b/substrate/primitives/consensus/sassafras/src/ticket.rs index dc0a61990d3e..345de99be28d 100644 --- a/substrate/primitives/consensus/sassafras/src/ticket.rs +++ b/substrate/primitives/consensus/sassafras/src/ticket.rs @@ -115,7 +115,7 @@ mod tests { let threshold = ticket_id_threshold(redundancy, slots, attempts, validators); let threshold = threshold as f64 / TicketId::MAX as f64; - // We expect that the total number of tickets allowed to be submited + // We expect that the total number of tickets allowed to be submitted // is slots*redundancy let avt = ((attempts * validators) as f64 * threshold) as u32; assert_eq!(avt, slots * redundancy); diff --git a/substrate/primitives/consensus/sassafras/src/vrf.rs b/substrate/primitives/consensus/sassafras/src/vrf.rs index 815edb5eb661..537cff52ab6f 100644 --- a/substrate/primitives/consensus/sassafras/src/vrf.rs +++ b/substrate/primitives/consensus/sassafras/src/vrf.rs @@ -101,7 +101,7 @@ pub fn make_ticket_id(input: &VrfInput, pre_output: &VrfPreOutput) -> TicketId { u128::from_le_bytes(bytes) } -/// Make revealed key seed from a given VRF input and pre-ouput. +/// Make revealed key seed from a given VRF input and pre-output. /// /// Input should have been obtained via [`revealed_key_input`]. /// Pre-output should have been obtained from the input directly using the vrf diff --git a/substrate/primitives/consensus/slots/Cargo.toml b/substrate/primitives/consensus/slots/Cargo.toml index 94c02dba203d..a8b129006179 100644 --- a/substrate/primitives/consensus/slots/Cargo.toml +++ b/substrate/primitives/consensus/slots/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-timestamp = { path = "../../timestamp", default-features = false } diff --git a/substrate/primitives/core/Cargo.toml b/substrate/primitives/core/Cargo.toml index 090249bc8a83..197eaa11b978 100644 --- a/substrate/primitives/core/Cargo.toml +++ b/substrate/primitives/core/Cargo.toml @@ -37,7 +37,7 @@ ss58-registry = { version = "1.34.0", default-features = false } sp-debug-derive = { path = "../debug-derive", default-features = false } sp-storage = { path = "../storage", default-features = false } sp-externalities = { path = "../externalities", optional = true } -futures = { version = "0.3.21", optional = true } +futures = { version = "0.3.30", optional = true } dyn-clonable = { version = "0.9.0", optional = true } thiserror = { optional = true, workspace = true } tracing = { version = "0.1.29", optional = true } diff --git a/substrate/primitives/core/src/address_uri.rs b/substrate/primitives/core/src/address_uri.rs index e32e7764c00e..bbe31b7553bd 100644 --- a/substrate/primitives/core/src/address_uri.rs +++ b/substrate/primitives/core/src/address_uri.rs @@ -85,7 +85,7 @@ impl Error { /// Complementary error information. /// -/// Strucutre contains complementary information about parsing address URI string. +/// Structure contains complementary information about parsing address URI string. /// String contains a copy of an original URI string, 0-based integer indicates position of invalid /// character. #[derive(Debug, PartialEq, Eq, Clone)] diff --git a/substrate/primitives/core/src/bandersnatch.rs b/substrate/primitives/core/src/bandersnatch.rs index fd0b87e1deb9..61ba8d0b94e6 100644 --- a/substrate/primitives/core/src/bandersnatch.rs +++ b/substrate/primitives/core/src/bandersnatch.rs @@ -20,23 +20,19 @@ //! //! The primitive can operate both as a regular VRF or as an anonymized Ring VRF. -#[cfg(feature = "serde")] -use crate::crypto::Ss58Codec; +#[cfg(feature = "full_crypto")] +use crate::crypto::VrfSecret; use crate::crypto::{ - ByteArray, CryptoType, CryptoTypeId, Derive, DeriveError, DeriveJunction, Pair as TraitPair, - Public as TraitPublic, SecretStringError, UncheckedFrom, VrfPublic, VrfSecret, + ByteArray, CryptoType, CryptoTypeId, DeriveError, DeriveJunction, Pair as TraitPair, + PublicBytes, SecretStringError, SignatureBytes, UncheckedFrom, VrfPublic, }; -#[cfg(all(not(feature = "std"), feature = "serde"))] -use alloc::{format, string::String}; -#[cfg(feature = "serde")] -use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use bandersnatch_vrfs::{CanonicalSerialize, SecretKey}; use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; use scale_info::TypeInfo; +#[cfg(not(feature = "std"))] use alloc::{vec, vec::Vec}; -use sp_runtime_interface::pass_by::PassByInner; /// Identifier used to match public keys against bandersnatch-vrf keys. pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"band"); @@ -44,166 +40,38 @@ pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"band"); /// Context used to produce a plain signature without any VRF input/output. pub const SIGNING_CTX: &[u8] = b"BandersnatchSigningContext"; -const SEED_SERIALIZED_SIZE: usize = 32; +/// The byte length of secret key seed. +pub const SEED_SERIALIZED_SIZE: usize = 32; -const PUBLIC_SERIALIZED_SIZE: usize = 33; -const SIGNATURE_SERIALIZED_SIZE: usize = 65; -const PREOUT_SERIALIZED_SIZE: usize = 33; +/// The byte length of serialized public key. +pub const PUBLIC_SERIALIZED_SIZE: usize = 33; -/// Bandersnatch public key. -#[derive( - Clone, - Copy, - PartialEq, - Eq, - PartialOrd, - Ord, - Encode, - Decode, - PassByInner, - MaxEncodedLen, - TypeInfo, - Hash, -)] -pub struct Public(pub [u8; PUBLIC_SERIALIZED_SIZE]); - -impl UncheckedFrom<[u8; PUBLIC_SERIALIZED_SIZE]> for Public { - fn unchecked_from(raw: [u8; PUBLIC_SERIALIZED_SIZE]) -> Self { - Public(raw) - } -} - -impl AsRef<[u8; PUBLIC_SERIALIZED_SIZE]> for Public { - fn as_ref(&self) -> &[u8; PUBLIC_SERIALIZED_SIZE] { - &self.0 - } -} - -impl AsRef<[u8]> for Public { - fn as_ref(&self) -> &[u8] { - &self.0[..] - } -} - -impl AsMut<[u8]> for Public { - fn as_mut(&mut self) -> &mut [u8] { - &mut self.0[..] - } -} - -impl TryFrom<&[u8]> for Public { - type Error = (); +/// The byte length of serialized signature. +pub const SIGNATURE_SERIALIZED_SIZE: usize = 65; - fn try_from(data: &[u8]) -> Result { - if data.len() != PUBLIC_SERIALIZED_SIZE { - return Err(()) - } - let mut r = [0u8; PUBLIC_SERIALIZED_SIZE]; - r.copy_from_slice(data); - Ok(Self::unchecked_from(r)) - } -} +/// The byte length of serialized pre-output. +pub const PREOUT_SERIALIZED_SIZE: usize = 33; -impl ByteArray for Public { - const LEN: usize = PUBLIC_SERIALIZED_SIZE; -} +#[doc(hidden)] +pub struct BandersnatchTag; -impl TraitPublic for Public {} +/// Bandersnatch public key. +pub type Public = PublicBytes; impl CryptoType for Public { type Pair = Pair; } -impl Derive for Public {} - -impl core::fmt::Debug for Public { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - let s = self.to_ss58check(); - write!(f, "{} ({}...)", crate::hexdisplay::HexDisplay::from(&self.as_ref()), &s[0..8]) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut core::fmt::Formatter) -> core::fmt::Result { - Ok(()) - } -} - -#[cfg(feature = "serde")] -impl Serialize for Public { - fn serialize(&self, serializer: S) -> Result { - serializer.serialize_str(&self.to_ss58check()) - } -} - -#[cfg(feature = "serde")] -impl<'de> Deserialize<'de> for Public { - fn deserialize>(deserializer: D) -> Result { - Public::from_ss58check(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e))) - } -} - /// Bandersnatch signature. /// /// The signature is created via the [`VrfSecret::vrf_sign`] using [`SIGNING_CTX`] as transcript /// `label`. -#[derive( - Clone, Copy, PartialEq, Eq, Encode, Decode, PassByInner, MaxEncodedLen, TypeInfo, Hash, -)] -pub struct Signature([u8; SIGNATURE_SERIALIZED_SIZE]); - -impl UncheckedFrom<[u8; SIGNATURE_SERIALIZED_SIZE]> for Signature { - fn unchecked_from(raw: [u8; SIGNATURE_SERIALIZED_SIZE]) -> Self { - Signature(raw) - } -} - -impl AsRef<[u8]> for Signature { - fn as_ref(&self) -> &[u8] { - &self.0[..] - } -} - -impl AsMut<[u8]> for Signature { - fn as_mut(&mut self) -> &mut [u8] { - &mut self.0[..] - } -} - -impl TryFrom<&[u8]> for Signature { - type Error = (); - - fn try_from(data: &[u8]) -> Result { - if data.len() != SIGNATURE_SERIALIZED_SIZE { - return Err(()) - } - let mut r = [0u8; SIGNATURE_SERIALIZED_SIZE]; - r.copy_from_slice(data); - Ok(Self::unchecked_from(r)) - } -} - -impl ByteArray for Signature { - const LEN: usize = SIGNATURE_SERIALIZED_SIZE; -} +pub type Signature = SignatureBytes; impl CryptoType for Signature { type Pair = Pair; } -impl core::fmt::Debug for Signature { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - write!(f, "{}", crate::hexdisplay::HexDisplay::from(&self.0)) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut core::fmt::Formatter) -> core::fmt::Result { - Ok(()) - } -} - /// The raw secret seed, which can be used to reconstruct the secret [`Pair`]. type Seed = [u8; SEED_SERIALIZED_SIZE]; @@ -382,7 +250,7 @@ pub mod vrf { /// /// The `transcript` summarizes a set of messages which are defining a particular /// protocol by automating the Fiat-Shamir transform for challenge generation. - /// A good explaination of the topic can be found in Merlin [docs](https://merlin.cool/) + /// A good explanation of the topic can be found in Merlin [docs](https://merlin.cool/) /// /// The `inputs` is a sequence of [`VrfInput`]s which, during the signing procedure, are /// first transformed to [`VrfPreOutput`]s. Both inputs and pre-outputs are then appended to @@ -540,8 +408,7 @@ pub mod vrf { thin_signature.preouts.into_iter().map(VrfPreOutput).collect(); let pre_outputs = VrfIosVec::truncate_from(pre_outputs); - let mut signature = - VrfSignature { signature: Signature([0; SIGNATURE_SERIALIZED_SIZE]), pre_outputs }; + let mut signature = VrfSignature { signature: Signature::default(), pre_outputs }; thin_signature .proof @@ -580,7 +447,7 @@ pub mod vrf { // This is another hack used because backend signature type is generic over // the number of ios. let Ok(proof) = ThinVrfSignature::<0>::deserialize_compressed_unchecked( - signature.signature.as_ref(), + signature.signature.as_slice(), ) .map(|s| s.proof) else { return false diff --git a/substrate/primitives/core/src/bls.rs b/substrate/primitives/core/src/bls.rs index c3e8660f655a..a86f67844da2 100644 --- a/substrate/primitives/core/src/bls.rs +++ b/substrate/primitives/core/src/bls.rs @@ -23,31 +23,18 @@ //! Chaum-Pedersen proof uses the same hash-to-field specified in RFC 9380 for the field of the BLS //! curve. -#[cfg(feature = "serde")] -use crate::crypto::Ss58Codec; use crate::crypto::{ - ByteArray, CryptoType, Derive, DeriveError, DeriveJunction, Pair as TraitPair, - Public as TraitPublic, SecretStringError, UncheckedFrom, + CryptoType, DeriveError, DeriveJunction, Pair as TraitPair, PublicBytes, SecretStringError, + SignatureBytes, UncheckedFrom, }; use alloc::vec::Vec; -use codec::{Decode, Encode, MaxEncodedLen}; -use scale_info::TypeInfo; - -#[cfg(all(not(feature = "std"), feature = "serde"))] -use alloc::{format, string::String}; -#[cfg(feature = "serde")] -use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; - use w3f_bls::{ DoublePublicKey, DoublePublicKeyScheme, DoubleSignature, EngineBLS, Keypair, Message, SecretKey, SerializableToBytes, TinyBLS381, }; -use core::{convert::TryFrom, marker::PhantomData, ops::Deref}; -use sp_runtime_interface::pass_by::{self, PassBy, PassByInner}; - /// BLS-377 specialized types pub mod bls377 { pub use super::{PUBLIC_KEY_SERIALIZED_SIZE, SIGNATURE_SERIALIZED_SIZE}; @@ -57,6 +44,9 @@ pub mod bls377 { /// An identifier used to match public keys against BLS12-377 keys pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"bls7"); + #[doc(hidden)] + pub type Bls377Tag = TinyBLS377; + /// BLS12-377 key pair. pub type Pair = super::Pair; /// BLS12-377 public key. @@ -113,296 +103,18 @@ pub const SIGNATURE_SERIALIZED_SIZE: usize = /// will need it later (such as for HDKD). type Seed = [u8; SECRET_KEY_SERIALIZED_SIZE]; -/// A public key. -#[derive(Copy, Encode, Decode, MaxEncodedLen, TypeInfo)] -#[scale_info(skip_type_params(T))] -pub struct Public { - inner: [u8; PUBLIC_KEY_SERIALIZED_SIZE], - _phantom: PhantomData T>, -} - -impl Clone for Public { - fn clone(&self) -> Self { - Self { inner: self.inner, _phantom: PhantomData } - } -} - -impl PartialEq for Public { - fn eq(&self, other: &Self) -> bool { - self.inner == other.inner - } -} - -impl Eq for Public {} - -impl PartialOrd for Public { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for Public { - fn cmp(&self, other: &Self) -> core::cmp::Ordering { - self.inner.cmp(&other.inner) - } -} - -impl core::hash::Hash for Public { - fn hash(&self, state: &mut H) { - self.inner.hash(state) - } -} - -impl ByteArray for Public { - const LEN: usize = PUBLIC_KEY_SERIALIZED_SIZE; -} - -impl PassByInner for Public { - type Inner = [u8; PUBLIC_KEY_SERIALIZED_SIZE]; - - fn into_inner(self) -> Self::Inner { - self.inner - } - - fn inner(&self) -> &Self::Inner { - &self.inner - } - - fn from_inner(inner: Self::Inner) -> Self { - Self { inner, _phantom: PhantomData } - } -} - -impl PassBy for Public { - type PassBy = pass_by::Inner; -} - -impl AsRef<[u8; PUBLIC_KEY_SERIALIZED_SIZE]> for Public { - fn as_ref(&self) -> &[u8; PUBLIC_KEY_SERIALIZED_SIZE] { - &self.inner - } -} - -impl AsRef<[u8]> for Public { - fn as_ref(&self) -> &[u8] { - &self.inner[..] - } -} - -impl AsMut<[u8]> for Public { - fn as_mut(&mut self) -> &mut [u8] { - &mut self.inner[..] - } -} - -impl Deref for Public { - type Target = [u8]; - - fn deref(&self) -> &Self::Target { - &self.inner - } -} - -impl TryFrom<&[u8]> for Public { - type Error = (); - - fn try_from(data: &[u8]) -> Result { - if data.len() != PUBLIC_KEY_SERIALIZED_SIZE { - return Err(()) - } - let mut r = [0u8; PUBLIC_KEY_SERIALIZED_SIZE]; - r.copy_from_slice(data); - Ok(Self::unchecked_from(r)) - } -} - -impl From> for [u8; PUBLIC_KEY_SERIALIZED_SIZE] { - fn from(x: Public) -> Self { - x.inner - } -} - -impl From> for Public { - fn from(x: Pair) -> Self { - x.public() - } -} - -impl UncheckedFrom<[u8; PUBLIC_KEY_SERIALIZED_SIZE]> for Public { - fn unchecked_from(data: [u8; PUBLIC_KEY_SERIALIZED_SIZE]) -> Self { - Public { inner: data, _phantom: PhantomData } - } -} - -#[cfg(feature = "std")] -impl std::str::FromStr for Public { - type Err = crate::crypto::PublicError; - - fn from_str(s: &str) -> Result { - Self::from_ss58check(s) - } -} - -#[cfg(feature = "std")] -impl std::fmt::Display for Public { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.to_ss58check()) - } -} - -#[cfg(feature = "std")] -impl core::fmt::Debug for Public { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - let s = self.to_ss58check(); - write!(f, "{} ({}...)", crate::hexdisplay::HexDisplay::from(&self.inner), &s[0..8]) - } -} - -#[cfg(not(feature = "std"))] -impl core::fmt::Debug for Public { - fn fmt(&self, _: &mut core::fmt::Formatter) -> core::fmt::Result { - Ok(()) - } -} - -#[cfg(feature = "serde")] -impl Serialize for Public { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&self.to_ss58check()) - } -} - -#[cfg(feature = "serde")] -impl<'de, T: BlsBound> Deserialize<'de> for Public { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - Public::from_ss58check(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e))) - } -} - -impl TraitPublic for Public {} +#[doc(hidden)] +pub struct BlsTag; -impl Derive for Public {} +/// A public key. +pub type Public = PublicBytes; impl CryptoType for Public { type Pair = Pair; } /// A generic BLS signature. -#[derive(Copy, Encode, Decode, MaxEncodedLen, TypeInfo)] -#[scale_info(skip_type_params(T))] -pub struct Signature { - inner: [u8; SIGNATURE_SERIALIZED_SIZE], - _phantom: PhantomData T>, -} - -impl Clone for Signature { - fn clone(&self) -> Self { - Self { inner: self.inner, _phantom: PhantomData } - } -} - -impl PartialEq for Signature { - fn eq(&self, other: &Self) -> bool { - self.inner == other.inner - } -} - -impl Eq for Signature {} - -impl core::hash::Hash for Signature { - fn hash(&self, state: &mut H) { - self.inner.hash(state) - } -} - -impl ByteArray for Signature { - const LEN: usize = SIGNATURE_SERIALIZED_SIZE; -} - -impl TryFrom<&[u8]> for Signature { - type Error = (); - - fn try_from(data: &[u8]) -> Result { - if data.len() != SIGNATURE_SERIALIZED_SIZE { - return Err(()) - } - let mut inner = [0u8; SIGNATURE_SERIALIZED_SIZE]; - inner.copy_from_slice(data); - Ok(Signature::unchecked_from(inner)) - } -} - -#[cfg(feature = "serde")] -impl Serialize for Signature { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&array_bytes::bytes2hex("", self)) - } -} - -#[cfg(feature = "serde")] -impl<'de, T> Deserialize<'de> for Signature { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let signature_hex = array_bytes::hex2bytes(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e)))?; - Signature::try_from(signature_hex.as_ref()) - .map_err(|e| de::Error::custom(format!("{:?}", e))) - } -} - -impl From> for [u8; SIGNATURE_SERIALIZED_SIZE] { - fn from(signature: Signature) -> [u8; SIGNATURE_SERIALIZED_SIZE] { - signature.inner - } -} - -impl AsRef<[u8; SIGNATURE_SERIALIZED_SIZE]> for Signature { - fn as_ref(&self) -> &[u8; SIGNATURE_SERIALIZED_SIZE] { - &self.inner - } -} - -impl AsRef<[u8]> for Signature { - fn as_ref(&self) -> &[u8] { - &self.inner[..] - } -} - -impl AsMut<[u8]> for Signature { - fn as_mut(&mut self) -> &mut [u8] { - &mut self.inner[..] - } -} - -impl core::fmt::Debug for Signature { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - write!(f, "{}", crate::hexdisplay::HexDisplay::from(&self.inner)) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut core::fmt::Formatter) -> core::fmt::Result { - Ok(()) - } -} - -impl UncheckedFrom<[u8; SIGNATURE_SERIALIZED_SIZE]> for Signature { - fn unchecked_from(data: [u8; SIGNATURE_SERIALIZED_SIZE]) -> Self { - Signature { inner: data, _phantom: PhantomData } - } -} +pub type Signature = SignatureBytes; impl CryptoType for Signature { type Pair = Pair; @@ -423,6 +135,7 @@ trait HardJunctionId { /// Derive a single hard junction. fn derive_hard_junction(secret_seed: &Seed, cc: &[u8; 32]) -> Seed { + use codec::Encode; (T::ID, secret_seed, cc).using_encoded(sp_crypto_hashing::blake2_256) } @@ -489,7 +202,7 @@ impl TraitPair for Pair { Err(_) => return false, }; - let sig_array = match sig.inner[..].try_into() { + let sig_array = match sig.0[..].try_into() { Ok(s) => s, Err(_) => return false, }; @@ -517,8 +230,10 @@ impl CryptoType for Pair { // Test set exercising the BLS12-377 implementation #[cfg(test)] -mod test { +mod tests { use super::*; + #[cfg(feature = "serde")] + use crate::crypto::Ss58Codec; use crate::crypto::DEV_PHRASE; use bls377::{Pair, Signature}; diff --git a/substrate/primitives/core/src/crypto.rs b/substrate/primitives/core/src/crypto.rs index c42e47a8d260..6e4b3bb63306 100644 --- a/substrate/primitives/core/src/crypto.rs +++ b/substrate/primitives/core/src/crypto.rs @@ -37,7 +37,10 @@ pub use ss58_registry::{from_known_address_format, Ss58AddressFormat, Ss58Addres /// Trait to zeroize a memory buffer. pub use zeroize::Zeroize; -pub use crate::address_uri::{AddressUri, Error as AddressUriError}; +pub use crate::{ + address_uri::{AddressUri, Error as AddressUriError}, + crypto_bytes::{CryptoBytes, PublicBytes, SignatureBytes}, +}; /// The root phrase for our publicly known keys. pub const DEV_PHRASE: &str = @@ -480,8 +483,11 @@ pub trait ByteArray: AsRef<[u8]> + AsMut<[u8]> + for<'a> TryFrom<&'a [u8], Error } } -/// Trait suitable for typical cryptographic key public type. -pub trait Public: CryptoType + ByteArray + Derive + PartialEq + Eq + Clone + Send + Sync {} +/// Trait suitable for cryptographic public keys. +pub trait Public: CryptoType + ByteArray + PartialEq + Eq + Clone + Send + Sync + Derive {} + +/// Trait suitable for cryptographic signatures. +pub trait Signature: CryptoType + ByteArray + PartialEq + Eq + Clone + Send + Sync {} /// An opaque 32-byte cryptographic identifier. #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, MaxEncodedLen, TypeInfo)] @@ -643,32 +649,11 @@ pub use self::dummy::*; mod dummy { use super::*; - /// Dummy cryptography. Doesn't do anything. - #[derive(Clone, Hash, Default, Eq, PartialEq)] - pub struct Dummy; - - impl AsRef<[u8]> for Dummy { - fn as_ref(&self) -> &[u8] { - &b""[..] - } - } + #[doc(hidden)] + pub struct DummyTag; - impl AsMut<[u8]> for Dummy { - fn as_mut(&mut self) -> &mut [u8] { - unsafe { - #[allow(mutable_transmutes)] - core::mem::transmute::<_, &'static mut [u8]>(&b""[..]) - } - } - } - - impl<'a> TryFrom<&'a [u8]> for Dummy { - type Error = (); - - fn try_from(_: &'a [u8]) -> Result { - Ok(Self) - } - } + /// Dummy cryptography. Doesn't do anything. + pub type Dummy = CryptoBytes<0, DummyTag>; impl CryptoType for Dummy { type Pair = Dummy; @@ -676,21 +661,10 @@ mod dummy { impl Derive for Dummy {} - impl ByteArray for Dummy { - const LEN: usize = 0; - fn from_slice(_: &[u8]) -> Result { - Ok(Self) - } - #[cfg(feature = "std")] - fn to_raw_vec(&self) -> Vec { - vec![] - } - fn as_slice(&self) -> &[u8] { - b"" - } - } impl Public for Dummy {} + impl Signature for Dummy {} + impl Pair for Dummy { type Public = Dummy; type Seed = Dummy; @@ -711,15 +685,15 @@ mod dummy { _: Iter, _: Option, ) -> Result<(Self, Option), DeriveError> { - Ok((Self, None)) + Ok((Self::default(), None)) } fn from_seed_slice(_: &[u8]) -> Result { - Ok(Self) + Ok(Self::default()) } fn sign(&self, _: &[u8]) -> Self::Signature { - Self + Self::default() } fn verify>(_: &Self::Signature, _: M, _: &Self::Public) -> bool { @@ -727,11 +701,11 @@ mod dummy { } fn public(&self) -> Self::Public { - Self + Self::default() } fn to_raw_vec(&self) -> Vec { - vec![] + Default::default() } } } @@ -840,7 +814,7 @@ pub trait Pair: CryptoType + Sized { /// The type used to represent a signature. Can be created from a key pair and a message /// and verified with the message and a public key. - type Signature: AsRef<[u8]>; + type Signature: Signature; /// Generate new secure (random) key pair. /// @@ -1213,6 +1187,8 @@ mod tests { use super::*; use crate::DeriveJunction; + struct TestCryptoTag; + #[derive(Clone, Eq, PartialEq, Debug)] enum TestPair { Generated, @@ -1221,59 +1197,33 @@ mod tests { Standard { phrase: String, password: Option, path: Vec }, Seed(Vec), } + impl Default for TestPair { fn default() -> Self { TestPair::Generated } } + impl CryptoType for TestPair { type Pair = Self; } - #[derive(Clone, PartialEq, Eq, Hash, Default)] - struct TestPublic; - impl AsRef<[u8]> for TestPublic { - fn as_ref(&self) -> &[u8] { - &[] - } - } - impl AsMut<[u8]> for TestPublic { - fn as_mut(&mut self) -> &mut [u8] { - &mut [] - } - } - impl<'a> TryFrom<&'a [u8]> for TestPublic { - type Error = (); + type TestPublic = PublicBytes<0, TestCryptoTag>; - fn try_from(data: &'a [u8]) -> Result { - Self::from_slice(data) - } - } impl CryptoType for TestPublic { type Pair = TestPair; } - impl Derive for TestPublic {} - impl ByteArray for TestPublic { - const LEN: usize = 0; - fn from_slice(bytes: &[u8]) -> Result { - if bytes.is_empty() { - Ok(Self) - } else { - Err(()) - } - } - fn as_slice(&self) -> &[u8] { - &[] - } - fn to_raw_vec(&self) -> Vec { - vec![] - } + + type TestSignature = SignatureBytes<0, TestCryptoTag>; + + impl CryptoType for TestSignature { + type Pair = TestPair; } - impl Public for TestPublic {} + impl Pair for TestPair { type Public = TestPublic; type Seed = [u8; 8]; - type Signature = [u8; 0]; + type Signature = TestSignature; fn generate() -> (Self, ::Seed) { (TestPair::Generated, [0u8; 8]) @@ -1322,7 +1272,7 @@ mod tests { } fn sign(&self, _message: &[u8]) -> Self::Signature { - [] + TestSignature::default() } fn verify>(_: &Self::Signature, _: M, _: &Self::Public) -> bool { @@ -1330,7 +1280,7 @@ mod tests { } fn public(&self) -> Self::Public { - TestPublic + TestPublic::default() } fn from_seed_slice(seed: &[u8]) -> Result { diff --git a/substrate/primitives/core/src/crypto_bytes.rs b/substrate/primitives/core/src/crypto_bytes.rs new file mode 100644 index 000000000000..ee5f3482f743 --- /dev/null +++ b/substrate/primitives/core/src/crypto_bytes.rs @@ -0,0 +1,379 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Generic byte array which can be specialized with a marker type. + +use crate::{ + crypto::{CryptoType, Derive, FromEntropy, Public, Signature, UncheckedFrom}, + hash::{H256, H512}, +}; + +use codec::{Decode, Encode, MaxEncodedLen}; +use core::marker::PhantomData; +use scale_info::TypeInfo; + +use sp_runtime_interface::pass_by::{self, PassBy, PassByInner}; + +#[cfg(feature = "serde")] +use crate::crypto::Ss58Codec; +#[cfg(feature = "serde")] +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; + +#[cfg(all(not(feature = "std"), feature = "serde"))] +use sp_std::alloc::{format, string::String}; + +pub use public_bytes::*; +pub use signature_bytes::*; + +/// Generic byte array holding some crypto-related raw data. +/// +/// The type is generic over a constant length `N` and a "tag" `T` which +/// can be used to specialize the byte array without requiring newtypes. +/// +/// The tag `T` is held in a `PhantomDataT>`, a trick allowing +/// `CryptoBytes` to be `Send` and `Sync` regardless of `T` properties +/// ([ref](https://doc.rust-lang.org/nomicon/phantom-data.html#table-of-phantomdata-patterns)). +#[derive(Encode, Decode, MaxEncodedLen)] +#[repr(transparent)] +pub struct CryptoBytes(pub [u8; N], PhantomData T>); + +impl Copy for CryptoBytes {} + +impl Clone for CryptoBytes { + fn clone(&self) -> Self { + Self(self.0, PhantomData) + } +} + +impl TypeInfo for CryptoBytes { + type Identity = [u8; N]; + + fn type_info() -> scale_info::Type { + Self::Identity::type_info() + } +} + +impl PartialOrd for CryptoBytes { + fn partial_cmp(&self, other: &Self) -> Option { + self.0.partial_cmp(&other.0) + } +} + +impl Ord for CryptoBytes { + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + self.0.cmp(&other.0) + } +} + +impl PartialEq for CryptoBytes { + fn eq(&self, other: &Self) -> bool { + self.0.eq(&other.0) + } +} + +impl core::hash::Hash for CryptoBytes { + fn hash(&self, state: &mut H) { + self.0.hash(state) + } +} + +impl Eq for CryptoBytes {} + +impl Default for CryptoBytes { + fn default() -> Self { + Self([0_u8; N], PhantomData) + } +} + +impl PassByInner for CryptoBytes { + type Inner = [u8; N]; + + fn into_inner(self) -> Self::Inner { + self.0 + } + + fn inner(&self) -> &Self::Inner { + &self.0 + } + + fn from_inner(inner: Self::Inner) -> Self { + Self(inner, PhantomData) + } +} + +impl PassBy for CryptoBytes { + type PassBy = pass_by::Inner; +} + +impl AsRef<[u8]> for CryptoBytes { + fn as_ref(&self) -> &[u8] { + &self.0[..] + } +} + +impl AsMut<[u8]> for CryptoBytes { + fn as_mut(&mut self) -> &mut [u8] { + &mut self.0[..] + } +} + +impl From> for [u8; N] { + fn from(v: CryptoBytes) -> [u8; N] { + v.0 + } +} + +impl AsRef<[u8; N]> for CryptoBytes { + fn as_ref(&self) -> &[u8; N] { + &self.0 + } +} + +impl AsMut<[u8; N]> for CryptoBytes { + fn as_mut(&mut self) -> &mut [u8; N] { + &mut self.0 + } +} + +impl From<[u8; N]> for CryptoBytes { + fn from(value: [u8; N]) -> Self { + Self::from_raw(value) + } +} + +impl TryFrom<&[u8]> for CryptoBytes { + type Error = (); + + fn try_from(data: &[u8]) -> Result { + if data.len() != N { + return Err(()) + } + let mut r = [0u8; N]; + r.copy_from_slice(data); + Ok(Self::from_raw(r)) + } +} + +impl UncheckedFrom<[u8; N]> for CryptoBytes { + fn unchecked_from(data: [u8; N]) -> Self { + Self::from_raw(data) + } +} + +impl core::ops::Deref for CryptoBytes { + type Target = [u8]; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl CryptoBytes { + /// Construct from raw array. + pub fn from_raw(inner: [u8; N]) -> Self { + Self(inner, PhantomData) + } + + /// Construct from raw array. + pub fn to_raw(self) -> [u8; N] { + self.0 + } + + /// Return a slice filled with raw data. + pub fn as_array_ref(&self) -> &[u8; N] { + &self.0 + } +} + +impl crate::ByteArray for CryptoBytes { + const LEN: usize = N; +} + +impl FromEntropy for CryptoBytes { + fn from_entropy(input: &mut impl codec::Input) -> Result { + let mut result = Self::default(); + input.read(result.as_mut())?; + Ok(result) + } +} + +impl From> for H256 { + fn from(x: CryptoBytes<32, T>) -> H256 { + H256::from(x.0) + } +} + +impl From> for H512 { + fn from(x: CryptoBytes<64, T>) -> H512 { + H512::from(x.0) + } +} + +impl UncheckedFrom for CryptoBytes<32, T> { + fn unchecked_from(x: H256) -> Self { + Self::from_h256(x) + } +} + +impl CryptoBytes<32, T> { + /// A new instance from an H256. + pub fn from_h256(x: H256) -> Self { + Self::from_raw(x.into()) + } +} + +impl CryptoBytes<64, T> { + /// A new instance from an H512. + pub fn from_h512(x: H512) -> Self { + Self::from_raw(x.into()) + } +} + +mod public_bytes { + use super::*; + + /// Tag used for generic public key bytes. + pub struct PublicTag; + + /// Generic encoded public key. + pub type PublicBytes = CryptoBytes; + + impl Derive for PublicBytes where Self: CryptoType {} + + impl Public for PublicBytes where Self: CryptoType {} + + impl sp_std::fmt::Debug for PublicBytes + where + Self: CryptoType, + { + #[cfg(feature = "std")] + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let s = self.to_ss58check(); + write!(f, "{} ({}...)", crate::hexdisplay::HexDisplay::from(&self.as_ref()), &s[0..8]) + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } + } + + #[cfg(feature = "std")] + impl std::fmt::Display for PublicBytes + where + Self: CryptoType, + { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}", self.to_ss58check()) + } + } + + #[cfg(feature = "std")] + impl std::str::FromStr for PublicBytes + where + Self: CryptoType, + { + type Err = crate::crypto::PublicError; + + fn from_str(s: &str) -> Result { + Self::from_ss58check(s) + } + } + + #[cfg(feature = "serde")] + impl Serialize for PublicBytes + where + Self: CryptoType, + { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&self.to_ss58check()) + } + } + + #[cfg(feature = "serde")] + impl<'de, const N: usize, SubTag> Deserialize<'de> for PublicBytes + where + Self: CryptoType, + { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + Self::from_ss58check(&String::deserialize(deserializer)?) + .map_err(|e| de::Error::custom(format!("{:?}", e))) + } + } +} + +mod signature_bytes { + use super::*; + + /// Tag used for generic signature bytes. + pub struct SignatureTag; + + /// Generic encoded signature. + pub type SignatureBytes = CryptoBytes; + + impl Signature for SignatureBytes where Self: CryptoType {} + + #[cfg(feature = "serde")] + impl Serialize for SignatureBytes + where + Self: CryptoType, + { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&array_bytes::bytes2hex("", self)) + } + } + + #[cfg(feature = "serde")] + impl<'de, const N: usize, SubTag> Deserialize<'de> for SignatureBytes + where + Self: CryptoType, + { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let signature_hex = array_bytes::hex2bytes(&String::deserialize(deserializer)?) + .map_err(|e| de::Error::custom(format!("{:?}", e)))?; + Self::try_from(signature_hex.as_ref()) + .map_err(|e| de::Error::custom(format!("{:?}", e))) + } + } + + impl sp_std::fmt::Debug for SignatureBytes + where + Self: CryptoType, + { + #[cfg(feature = "std")] + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "{}", crate::hexdisplay::HexDisplay::from(&&self.0[..])) + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } + } +} diff --git a/substrate/primitives/core/src/ecdsa.rs b/substrate/primitives/core/src/ecdsa.rs index 620137509085..da71a52d837d 100644 --- a/substrate/primitives/core/src/ecdsa.rs +++ b/substrate/primitives/core/src/ecdsa.rs @@ -17,15 +17,9 @@ //! Simple ECDSA secp256k1 API. -use codec::{Decode, Encode, MaxEncodedLen}; -use scale_info::TypeInfo; -use sp_runtime_interface::pass_by::PassByInner; - -#[cfg(feature = "serde")] -use crate::crypto::Ss58Codec; use crate::crypto::{ - ByteArray, CryptoType, CryptoTypeId, Derive, DeriveError, DeriveJunction, Pair as TraitPair, - Public as TraitPublic, SecretStringError, UncheckedFrom, + CryptoType, CryptoTypeId, DeriveError, DeriveJunction, Pair as TraitPair, PublicBytes, + SecretStringError, SignatureBytes, }; #[cfg(all(not(feature = "std"), feature = "serde"))] use alloc::{format, string::String}; @@ -41,8 +35,8 @@ use secp256k1::{ ecdsa::{RecoverableSignature, RecoveryId}, Message, PublicKey, SecretKey, SECP256K1, }; -#[cfg(feature = "serde")] -use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; /// An identifier used to match public keys against ecdsa keys pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"ecds"); @@ -53,45 +47,18 @@ pub const PUBLIC_KEY_SERIALIZED_SIZE: usize = 33; /// The byte length of signature pub const SIGNATURE_SERIALIZED_SIZE: usize = 65; +#[doc(hidden)] +pub struct EcdsaTag; + /// The secret seed. /// /// The raw secret seed, which can be used to create the `Pair`. type Seed = [u8; 32]; /// The ECDSA compressed public key. -#[derive( - Clone, - Copy, - Encode, - Decode, - PassByInner, - MaxEncodedLen, - TypeInfo, - Eq, - PartialEq, - PartialOrd, - Ord, - Hash, -)] -pub struct Public(pub [u8; PUBLIC_KEY_SERIALIZED_SIZE]); - -impl crate::crypto::FromEntropy for Public { - fn from_entropy(input: &mut impl codec::Input) -> Result { - let mut result = Self([0u8; PUBLIC_KEY_SERIALIZED_SIZE]); - input.read(&mut result.0[..])?; - Ok(result) - } -} +pub type Public = PublicBytes; impl Public { - /// A new instance from the given 33-byte `data`. - /// - /// NOTE: No checking goes on to ensure this is a real public key. Only use it if - /// you are certain that the array actually is a pubkey. GIGO! - pub fn from_raw(data: [u8; PUBLIC_KEY_SERIALIZED_SIZE]) -> Self { - Self(data) - } - /// Create a new instance from the given full public key. /// /// This will convert the full public key into the compressed format. @@ -113,54 +80,18 @@ impl Public { } } -impl ByteArray for Public { - const LEN: usize = PUBLIC_KEY_SERIALIZED_SIZE; -} - -impl TraitPublic for Public {} - -impl Derive for Public {} - -impl AsRef<[u8]> for Public { - fn as_ref(&self) -> &[u8] { - &self.0[..] - } -} - -impl AsMut<[u8]> for Public { - fn as_mut(&mut self) -> &mut [u8] { - &mut self.0[..] - } -} - #[cfg(feature = "std")] impl From for Public { fn from(pubkey: PublicKey) -> Self { - Self(pubkey.serialize()) + Self::from(pubkey.serialize()) } } #[cfg(not(feature = "std"))] impl From for Public { fn from(pubkey: VerifyingKey) -> Self { - Self::unchecked_from( - pubkey.to_sec1_bytes()[..] - .try_into() - .expect("valid key is serializable to [u8,33]. qed."), - ) - } -} - -impl TryFrom<&[u8]> for Public { - type Error = (); - - fn try_from(data: &[u8]) -> Result { - if data.len() != Self::LEN { - return Err(()) - } - let mut r = [0u8; Self::LEN]; - r.copy_from_slice(data); - Ok(Self::unchecked_from(r)) + Self::try_from(&pubkey.to_sec1_bytes()[..]) + .expect("Valid key is serializable to [u8; 33]. qed.") } } @@ -171,176 +102,10 @@ impl From for Public { } } -impl UncheckedFrom<[u8; PUBLIC_KEY_SERIALIZED_SIZE]> for Public { - fn unchecked_from(x: [u8; PUBLIC_KEY_SERIALIZED_SIZE]) -> Self { - Public(x) - } -} - -#[cfg(feature = "std")] -impl std::fmt::Display for Public { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.to_ss58check()) - } -} - -impl core::fmt::Debug for Public { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - let s = self.to_ss58check(); - write!(f, "{} ({}...)", crate::hexdisplay::HexDisplay::from(&self.as_ref()), &s[0..8]) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut core::fmt::Formatter) -> core::fmt::Result { - Ok(()) - } -} - -#[cfg(feature = "serde")] -impl Serialize for Public { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&self.to_ss58check()) - } -} - -#[cfg(feature = "serde")] -impl<'de> Deserialize<'de> for Public { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - Public::from_ss58check(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e))) - } -} - /// A signature (a 512-bit value, plus 8 bits for recovery ID). -#[derive(Hash, Encode, Decode, MaxEncodedLen, PassByInner, TypeInfo, PartialEq, Eq)] -pub struct Signature(pub [u8; SIGNATURE_SERIALIZED_SIZE]); - -impl ByteArray for Signature { - const LEN: usize = SIGNATURE_SERIALIZED_SIZE; -} - -impl TryFrom<&[u8]> for Signature { - type Error = (); - - fn try_from(data: &[u8]) -> Result { - if data.len() == SIGNATURE_SERIALIZED_SIZE { - let mut inner = [0u8; SIGNATURE_SERIALIZED_SIZE]; - inner.copy_from_slice(data); - Ok(Signature(inner)) - } else { - Err(()) - } - } -} - -#[cfg(feature = "serde")] -impl Serialize for Signature { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&array_bytes::bytes2hex("", self)) - } -} - -#[cfg(feature = "serde")] -impl<'de> Deserialize<'de> for Signature { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let signature_hex = array_bytes::hex2bytes(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e)))?; - Signature::try_from(signature_hex.as_ref()) - .map_err(|e| de::Error::custom(format!("{:?}", e))) - } -} - -impl Clone for Signature { - fn clone(&self) -> Self { - let mut r = [0u8; SIGNATURE_SERIALIZED_SIZE]; - r.copy_from_slice(&self.0[..]); - Signature(r) - } -} - -impl Default for Signature { - fn default() -> Self { - Signature([0u8; SIGNATURE_SERIALIZED_SIZE]) - } -} - -impl From for [u8; SIGNATURE_SERIALIZED_SIZE] { - fn from(v: Signature) -> [u8; SIGNATURE_SERIALIZED_SIZE] { - v.0 - } -} - -impl AsRef<[u8; SIGNATURE_SERIALIZED_SIZE]> for Signature { - fn as_ref(&self) -> &[u8; SIGNATURE_SERIALIZED_SIZE] { - &self.0 - } -} - -impl AsRef<[u8]> for Signature { - fn as_ref(&self) -> &[u8] { - &self.0[..] - } -} - -impl AsMut<[u8]> for Signature { - fn as_mut(&mut self) -> &mut [u8] { - &mut self.0[..] - } -} - -impl core::fmt::Debug for Signature { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - write!(f, "{}", crate::hexdisplay::HexDisplay::from(&self.0)) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut core::fmt::Formatter) -> core::fmt::Result { - Ok(()) - } -} - -impl UncheckedFrom<[u8; SIGNATURE_SERIALIZED_SIZE]> for Signature { - fn unchecked_from(data: [u8; SIGNATURE_SERIALIZED_SIZE]) -> Signature { - Signature(data) - } -} +pub type Signature = SignatureBytes; impl Signature { - /// A new instance from the given 65-byte `data`. - /// - /// NOTE: No checking goes on to ensure this is a real signature. Only use it if - /// you are certain that the array actually is a signature. GIGO! - pub fn from_raw(data: [u8; SIGNATURE_SERIALIZED_SIZE]) -> Signature { - Signature(data) - } - - /// A new instance from the given slice that should be 65 bytes long. - /// - /// NOTE: No checking goes on to ensure this is a real signature. Only use it if - /// you are certain that the array actually is a signature. GIGO! - pub fn from_slice(data: &[u8]) -> Option { - if data.len() != SIGNATURE_SERIALIZED_SIZE { - return None - } - let mut r = [0u8; SIGNATURE_SERIALIZED_SIZE]; - r.copy_from_slice(data); - Some(Signature(r)) - } - /// Recover the public key from this signature and a message. pub fn recover>(&self, message: M) -> Option { self.recover_prehashed(&sp_crypto_hashing::blake2_256(message.as_ref())) @@ -352,7 +117,8 @@ impl Signature { { let rid = RecoveryId::from_i32(self.0[64] as i32).ok()?; let sig = RecoverableSignature::from_compact(&self.0[..64], rid).ok()?; - let message = Message::from_digest_slice(message).expect("Message is 32 bytes; qed"); + let message = + Message::from_digest_slice(message).expect("Message is a 32 bytes hash; qed"); SECP256K1.recover_ecdsa(&message, &sig).ok().map(Public::from) } @@ -389,6 +155,7 @@ impl From for Signature { /// Derive a single hard junction. fn derive_hard_junction(secret_seed: &Seed, cc: &[u8; 32]) -> Seed { + use codec::Encode; ("Secp256k1HDKD", secret_seed, cc).using_encoded(sp_crypto_hashing::blake2_256) } @@ -492,15 +259,18 @@ impl Pair { pub fn sign_prehashed(&self, message: &[u8; 32]) -> Signature { #[cfg(feature = "std")] { - let message = Message::from_digest_slice(message).expect("Message is 32 bytes; qed"); + let message = + Message::from_digest_slice(message).expect("Message is a 32 bytes hash; qed"); SECP256K1.sign_ecdsa_recoverable(&message, &self.secret).into() } #[cfg(not(feature = "std"))] { + // Signing fails only if the `message` number of bytes is less than the field length + // (unfallible as we're using a fixed message length of 32). self.secret .sign_prehash_recoverable(message) - .expect("signing may not fail (???). qed.") + .expect("Signing can't fail when using 32 bytes message hash. qed.") .into() } } @@ -567,7 +337,7 @@ mod test { use super::*; use crate::crypto::{ set_default_ss58_version, PublicError, Ss58AddressFormat, Ss58AddressFormatRegistry, - DEV_PHRASE, + Ss58Codec, DEV_PHRASE, }; #[test] diff --git a/substrate/primitives/core/src/ed25519.rs b/substrate/primitives/core/src/ed25519.rs index 7f614a6b4ec2..d0598d390324 100644 --- a/substrate/primitives/core/src/ed25519.rs +++ b/substrate/primitives/core/src/ed25519.rs @@ -15,58 +15,40 @@ // See the License for the specific language governing permissions and // limitations under the License. -// tag::description[] //! Simple Ed25519 API. -// end::description[] -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; - -use crate::{ - crypto::ByteArray, - hash::{H256, H512}, -}; -use codec::{Decode, Encode, MaxEncodedLen}; -use scale_info::TypeInfo; - -#[cfg(feature = "serde")] -use crate::crypto::Ss58Codec; use crate::crypto::{ - CryptoType, CryptoTypeId, Derive, DeriveError, DeriveJunction, FromEntropy, Pair as TraitPair, - Public as TraitPublic, SecretStringError, UncheckedFrom, + ByteArray, CryptoType, CryptoTypeId, DeriveError, DeriveJunction, Pair as TraitPair, + PublicBytes, SecretStringError, SignatureBytes, }; -#[cfg(all(not(feature = "std"), feature = "serde"))] -use alloc::{format, string::String}; -use core::ops::Deref; + use ed25519_zebra::{SigningKey, VerificationKey}; -#[cfg(feature = "serde")] -use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; -use sp_runtime_interface::pass_by::PassByInner; + +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; /// An identifier used to match public keys against ed25519 keys pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"ed25"); +/// The byte length of public key +pub const PUBLIC_KEY_SERIALIZED_SIZE: usize = 32; + +/// The byte length of signature +pub const SIGNATURE_SERIALIZED_SIZE: usize = 64; + /// A secret seed. It's not called a "secret key" because ring doesn't expose the secret keys /// of the key pair (yeah, dumb); as such we're forced to remember the seed manually if we /// will need it later (such as for HDKD). type Seed = [u8; 32]; +#[doc(hidden)] +pub struct Ed25519Tag; + /// A public key. -#[derive( - PartialEq, - Eq, - PartialOrd, - Ord, - Clone, - Copy, - Encode, - Decode, - PassByInner, - MaxEncodedLen, - TypeInfo, - Hash, -)] -pub struct Public(pub [u8; 32]); +pub type Public = PublicBytes; + +/// A signature. +pub type Signature = SignatureBytes; /// A key pair. #[derive(Copy, Clone)] @@ -75,295 +57,9 @@ pub struct Pair { secret: SigningKey, } -impl FromEntropy for Public { - fn from_entropy(input: &mut impl codec::Input) -> Result { - let mut result = Self([0u8; 32]); - input.read(&mut result.0[..])?; - Ok(result) - } -} - -impl AsRef<[u8; 32]> for Public { - fn as_ref(&self) -> &[u8; 32] { - &self.0 - } -} - -impl AsRef<[u8]> for Public { - fn as_ref(&self) -> &[u8] { - &self.0[..] - } -} - -impl AsMut<[u8]> for Public { - fn as_mut(&mut self) -> &mut [u8] { - &mut self.0[..] - } -} - -impl Deref for Public { - type Target = [u8]; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl TryFrom<&[u8]> for Public { - type Error = (); - - fn try_from(data: &[u8]) -> Result { - if data.len() != Self::LEN { - return Err(()) - } - let mut r = [0u8; Self::LEN]; - r.copy_from_slice(data); - Ok(Self::unchecked_from(r)) - } -} - -impl From for [u8; 32] { - fn from(x: Public) -> Self { - x.0 - } -} - -#[cfg(feature = "full_crypto")] -impl From for Public { - fn from(x: Pair) -> Self { - x.public() - } -} - -impl From for H256 { - fn from(x: Public) -> Self { - x.0.into() - } -} - -#[cfg(feature = "std")] -impl std::str::FromStr for Public { - type Err = crate::crypto::PublicError; - - fn from_str(s: &str) -> Result { - Self::from_ss58check(s) - } -} - -impl UncheckedFrom<[u8; 32]> for Public { - fn unchecked_from(x: [u8; 32]) -> Self { - Public::from_raw(x) - } -} - -impl UncheckedFrom for Public { - fn unchecked_from(x: H256) -> Self { - Public::from_h256(x) - } -} - -#[cfg(feature = "std")] -impl std::fmt::Display for Public { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.to_ss58check()) - } -} - -impl core::fmt::Debug for Public { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - let s = self.to_ss58check(); - write!(f, "{} ({}...)", crate::hexdisplay::HexDisplay::from(&self.0), &s[0..8]) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut core::fmt::Formatter) -> core::fmt::Result { - Ok(()) - } -} - -#[cfg(feature = "serde")] -impl Serialize for Public { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&self.to_ss58check()) - } -} - -#[cfg(feature = "serde")] -impl<'de> Deserialize<'de> for Public { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - Public::from_ss58check(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e))) - } -} - -/// A signature (a 512-bit value). -#[derive(Encode, Decode, MaxEncodedLen, PassByInner, TypeInfo, PartialEq, Eq, Hash)] -pub struct Signature(pub [u8; 64]); - -impl TryFrom<&[u8]> for Signature { - type Error = (); - - fn try_from(data: &[u8]) -> Result { - if data.len() == 64 { - let mut inner = [0u8; 64]; - inner.copy_from_slice(data); - Ok(Signature(inner)) - } else { - Err(()) - } - } -} - -#[cfg(feature = "serde")] -impl Serialize for Signature { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&array_bytes::bytes2hex("", self)) - } -} - -#[cfg(feature = "serde")] -impl<'de> Deserialize<'de> for Signature { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let signature_hex = array_bytes::hex2bytes(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e)))?; - Signature::try_from(signature_hex.as_ref()) - .map_err(|e| de::Error::custom(format!("{:?}", e))) - } -} - -impl Clone for Signature { - fn clone(&self) -> Self { - let mut r = [0u8; 64]; - r.copy_from_slice(&self.0[..]); - Signature(r) - } -} - -impl From for H512 { - fn from(v: Signature) -> H512 { - H512::from(v.0) - } -} - -impl From for [u8; 64] { - fn from(v: Signature) -> [u8; 64] { - v.0 - } -} - -impl AsRef<[u8; 64]> for Signature { - fn as_ref(&self) -> &[u8; 64] { - &self.0 - } -} - -impl AsRef<[u8]> for Signature { - fn as_ref(&self) -> &[u8] { - &self.0[..] - } -} - -impl AsMut<[u8]> for Signature { - fn as_mut(&mut self) -> &mut [u8] { - &mut self.0[..] - } -} - -impl core::fmt::Debug for Signature { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - write!(f, "{}", crate::hexdisplay::HexDisplay::from(&self.0)) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut core::fmt::Formatter) -> core::fmt::Result { - Ok(()) - } -} - -impl UncheckedFrom<[u8; 64]> for Signature { - fn unchecked_from(data: [u8; 64]) -> Signature { - Signature(data) - } -} - -impl Signature { - /// A new instance from the given 64-byte `data`. - /// - /// NOTE: No checking goes on to ensure this is a real signature. Only use it if - /// you are certain that the array actually is a signature. GIGO! - pub fn from_raw(data: [u8; 64]) -> Signature { - Signature(data) - } - - /// A new instance from the given slice that should be 64 bytes long. - /// - /// NOTE: No checking goes on to ensure this is a real signature. Only use it if - /// you are certain that the array actually is a signature. GIGO! - pub fn from_slice(data: &[u8]) -> Option { - if data.len() != 64 { - return None - } - let mut r = [0u8; 64]; - r.copy_from_slice(data); - Some(Signature(r)) - } - - /// A new instance from an H512. - /// - /// NOTE: No checking goes on to ensure this is a real signature. Only use it if - /// you are certain that the array actually is a signature. GIGO! - pub fn from_h512(v: H512) -> Signature { - Signature(v.into()) - } -} - -impl Public { - /// A new instance from the given 32-byte `data`. - /// - /// NOTE: No checking goes on to ensure this is a real public key. Only use it if - /// you are certain that the array actually is a pubkey. GIGO! - pub fn from_raw(data: [u8; 32]) -> Self { - Public(data) - } - - /// A new instance from an H256. - /// - /// NOTE: No checking goes on to ensure this is a real public key. Only use it if - /// you are certain that the array actually is a pubkey. GIGO! - pub fn from_h256(x: H256) -> Self { - Public(x.into()) - } - - /// Return a slice filled with raw data. - pub fn as_array_ref(&self) -> &[u8; 32] { - self.as_ref() - } -} - -impl ByteArray for Public { - const LEN: usize = 32; -} - -impl TraitPublic for Public {} - -impl Derive for Public {} - /// Derive a single hard junction. fn derive_hard_junction(secret_seed: &Seed, cc: &[u8; 32]) -> Seed { + use codec::Encode; ("Ed25519HDKD", secret_seed, cc).using_encoded(sp_crypto_hashing::blake2_256) } @@ -401,7 +97,7 @@ impl TraitPair for Pair { /// Get the public key. fn public(&self) -> Public { - Public(self.public.into()) + Public::from_raw(self.public.into()) } /// Sign a message. @@ -457,8 +153,10 @@ impl CryptoType for Pair { } #[cfg(test)] -mod test { +mod tests { use super::*; + #[cfg(feature = "serde")] + use crate::crypto::Ss58Codec; use crate::crypto::DEV_PHRASE; #[test] diff --git a/substrate/primitives/core/src/lib.rs b/substrate/primitives/core/src/lib.rs index 4fbe3a72053d..5cb0d152cc25 100644 --- a/substrate/primitives/core/src/lib.rs +++ b/substrate/primitives/core/src/lib.rs @@ -60,26 +60,27 @@ pub mod const_hex2array; pub mod crypto; pub mod hexdisplay; pub use paste; - mod address_uri; -#[cfg(feature = "bandersnatch-experimental")] -pub mod bandersnatch; -#[cfg(feature = "bls-experimental")] -pub mod bls; pub mod defer; -pub mod ecdsa; -pub mod ed25519; pub mod hash; #[cfg(feature = "std")] mod hasher; pub mod offchain; -pub mod paired_crypto; -pub mod sr25519; pub mod testing; #[cfg(feature = "std")] pub mod traits; pub mod uint; +#[cfg(feature = "bandersnatch-experimental")] +pub mod bandersnatch; +#[cfg(feature = "bls-experimental")] +pub mod bls; +pub mod crypto_bytes; +pub mod ecdsa; +pub mod ed25519; +pub mod paired_crypto; +pub mod sr25519; + #[cfg(feature = "bls-experimental")] pub use bls::{bls377, bls381}; #[cfg(feature = "bls-experimental")] diff --git a/substrate/primitives/core/src/paired_crypto.rs b/substrate/primitives/core/src/paired_crypto.rs index 4cbc0e6064a4..99423a3dfcf1 100644 --- a/substrate/primitives/core/src/paired_crypto.rs +++ b/substrate/primitives/core/src/paired_crypto.rs @@ -17,25 +17,16 @@ //! API for using a pair of crypto schemes together. -#[cfg(feature = "serde")] -use crate::crypto::Ss58Codec; +use core::marker::PhantomData; + use crate::crypto::{ - ByteArray, CryptoType, Derive, DeriveError, DeriveJunction, Pair as PairT, Public as PublicT, - SecretStringError, UncheckedFrom, + ByteArray, CryptoType, DeriveError, DeriveJunction, Pair as PairT, Public as PublicT, + PublicBytes, SecretStringError, Signature as SignatureT, SignatureBytes, UncheckedFrom, }; #[cfg(not(feature = "std"))] use alloc::vec::Vec; -#[cfg(all(not(feature = "std"), feature = "serde"))] -use alloc::{format, string::String}; -use codec::{Decode, Encode, MaxEncodedLen}; -use scale_info::TypeInfo; -#[cfg(feature = "serde")] -use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; - -use sp_runtime_interface::pass_by::{self, PassBy, PassByInner}; - /// ECDSA and BLS12-377 paired crypto scheme #[cfg(feature = "bls-experimental")] pub mod ecdsa_bls377 { @@ -54,12 +45,20 @@ pub mod ecdsa_bls377 { const SIGNATURE_LEN: usize = ecdsa::SIGNATURE_SERIALIZED_SIZE + bls377::SIGNATURE_SERIALIZED_SIZE; + #[doc(hidden)] + pub struct EcdsaBls377Tag(ecdsa::EcdsaTag, bls377::Bls377Tag); + + impl super::PairedCryptoSubTagBound for EcdsaBls377Tag {} + /// (ECDSA,BLS12-377) key-pair pair. - pub type Pair = super::Pair; + pub type Pair = + super::Pair; + /// (ECDSA,BLS12-377) public key pair. - pub type Public = super::Public; + pub type Public = super::Public; + /// (ECDSA,BLS12-377) signature pair. - pub type Signature = super::Signature; + pub type Signature = super::Signature; impl super::CryptoType for Public { type Pair = Pair; @@ -75,7 +74,7 @@ pub mod ecdsa_bls377 { #[cfg(feature = "full_crypto")] impl Pair { - /// Hashes the `message` with the specified [`Hasher`] before signing sith the ECDSA secret + /// Hashes the `message` with the specified [`Hasher`] before signing with the ECDSA secret /// component. /// /// The hasher does not affect the BLS12-377 component. This generates BLS12-377 Signature @@ -110,7 +109,7 @@ pub mod ecdsa_bls377 { let Ok(left_pub) = public.0[..ecdsa::PUBLIC_KEY_SERIALIZED_SIZE].try_into() else { return false }; - let Ok(left_sig) = sig.0[0..ecdsa::SIGNATURE_SERIALIZED_SIZE].try_into() else { + let Ok(left_sig) = sig.0[..ecdsa::SIGNATURE_SERIALIZED_SIZE].try_into() else { return false }; if !ecdsa::Pair::verify_prehashed(&left_sig, &msg_hash, &left_pub) { @@ -140,276 +139,62 @@ const SECURE_SEED_LEN: usize = 32; /// will need it later (such as for HDKD). type Seed = [u8; SECURE_SEED_LEN]; -/// A public key. -#[derive(Clone, Encode, Decode, MaxEncodedLen, TypeInfo, PartialEq, Eq, PartialOrd, Ord)] -pub struct Public([u8; LEFT_PLUS_RIGHT_LEN]); - -impl core::hash::Hash for Public { - fn hash(&self, state: &mut H) { - self.0.hash(state); - } -} - -impl ByteArray for Public { - const LEN: usize = LEFT_PLUS_RIGHT_LEN; -} - -impl TryFrom<&[u8]> for Public { - type Error = (); - - fn try_from(data: &[u8]) -> Result { - if data.len() != LEFT_PLUS_RIGHT_LEN { - return Err(()) - } - let mut inner = [0u8; LEFT_PLUS_RIGHT_LEN]; - inner.copy_from_slice(data); - Ok(Public(inner)) - } -} - -impl AsRef<[u8; LEFT_PLUS_RIGHT_LEN]> - for Public -{ - fn as_ref(&self) -> &[u8; LEFT_PLUS_RIGHT_LEN] { - &self.0 - } -} - -impl AsRef<[u8]> for Public { - fn as_ref(&self) -> &[u8] { - &self.0[..] - } -} - -impl AsMut<[u8]> for Public { - fn as_mut(&mut self) -> &mut [u8] { - &mut self.0[..] - } -} - -impl PassByInner for Public { - type Inner = [u8; LEFT_PLUS_RIGHT_LEN]; - - fn into_inner(self) -> Self::Inner { - self.0 - } +#[doc(hidden)] +pub trait PairedCryptoSubTagBound {} +#[doc(hidden)] +pub struct PairedCryptoTag; - fn inner(&self) -> &Self::Inner { - &self.0 - } - - fn from_inner(inner: Self::Inner) -> Self { - Self(inner) - } -} - -impl PassBy for Public { - type PassBy = pass_by::Inner; -} +/// A public key. +pub type Public = + PublicBytes; impl< LeftPair: PairT, RightPair: PairT, const LEFT_PLUS_RIGHT_PUBLIC_LEN: usize, const SIGNATURE_LEN: usize, - > From> - for Public + SubTag: PairedCryptoSubTagBound, + > From> + for Public where - Pair: - PairT>, + Pair: + PairT>, { - fn from(x: Pair) -> Self { + fn from( + x: Pair, + ) -> Self { x.public() } } -impl UncheckedFrom<[u8; LEFT_PLUS_RIGHT_LEN]> - for Public -{ - fn unchecked_from(data: [u8; LEFT_PLUS_RIGHT_LEN]) -> Self { - Public(data) - } -} - -#[cfg(feature = "std")] -impl std::fmt::Display for Public -where - Public: CryptoType, -{ - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.to_ss58check()) - } -} - -impl core::fmt::Debug for Public -where - Public: CryptoType, - [u8; LEFT_PLUS_RIGHT_LEN]: crate::hexdisplay::AsBytesRef, -{ - #[cfg(feature = "std")] - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - let s = self.to_ss58check(); - write!(f, "{} ({}...)", crate::hexdisplay::HexDisplay::from(&self.0), &s[0..8]) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut core::fmt::Formatter) -> core::fmt::Result { - Ok(()) - } -} - -#[cfg(feature = "serde")] -impl Serialize for Public -where - Public: CryptoType, -{ - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&self.to_ss58check()) - } -} - -#[cfg(feature = "serde")] -impl<'de, const LEFT_PLUS_RIGHT_LEN: usize> Deserialize<'de> for Public -where - Public: CryptoType, -{ - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - Public::from_ss58check(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e))) - } -} - -impl PublicT for Public where - Public: CryptoType -{ -} - -impl Derive for Public {} - -/// Trait characterizing a signature which could be used as individual component of an -/// `paired_crypto:Signature` pair. -pub trait SignatureBound: ByteArray {} - -impl SignatureBound for T {} - /// A pair of signatures of different types -#[derive(Clone, Encode, Decode, MaxEncodedLen, TypeInfo, PartialEq, Eq)] -pub struct Signature([u8; LEFT_PLUS_RIGHT_LEN]); - -impl core::hash::Hash for Signature { - fn hash(&self, state: &mut H) { - self.0.hash(state); - } -} - -impl ByteArray for Signature { - const LEN: usize = LEFT_PLUS_RIGHT_LEN; -} - -impl TryFrom<&[u8]> for Signature { - type Error = (); - - fn try_from(data: &[u8]) -> Result { - if data.len() != LEFT_PLUS_RIGHT_LEN { - return Err(()) - } - let mut inner = [0u8; LEFT_PLUS_RIGHT_LEN]; - inner.copy_from_slice(data); - Ok(Signature(inner)) - } -} - -impl AsMut<[u8]> for Signature { - fn as_mut(&mut self) -> &mut [u8] { - &mut self.0[..] - } -} - -impl AsRef<[u8; LEFT_PLUS_RIGHT_LEN]> - for Signature -{ - fn as_ref(&self) -> &[u8; LEFT_PLUS_RIGHT_LEN] { - &self.0 - } -} - -impl AsRef<[u8]> for Signature { - fn as_ref(&self) -> &[u8] { - &self.0[..] - } -} - -#[cfg(feature = "serde")] -impl Serialize for Signature { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&array_bytes::bytes2hex("", self)) - } -} - -#[cfg(feature = "serde")] -impl<'de, const LEFT_PLUS_RIGHT_LEN: usize> Deserialize<'de> for Signature { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let bytes = array_bytes::hex2bytes(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e)))?; - Signature::::try_from(bytes.as_ref()).map_err(|e| { - de::Error::custom(format!("Error converting deserialized data into signature: {:?}", e)) - }) - } -} - -impl From> - for [u8; LEFT_PLUS_RIGHT_LEN] -{ - fn from(signature: Signature) -> [u8; LEFT_PLUS_RIGHT_LEN] { - signature.0 - } -} - -impl core::fmt::Debug for Signature -where - [u8; LEFT_PLUS_RIGHT_LEN]: crate::hexdisplay::AsBytesRef, -{ - #[cfg(feature = "std")] - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - write!(f, "{}", crate::hexdisplay::HexDisplay::from(&self.0)) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut core::fmt::Formatter) -> core::fmt::Result { - Ok(()) - } -} - -impl UncheckedFrom<[u8; LEFT_PLUS_RIGHT_LEN]> - for Signature -{ - fn unchecked_from(data: [u8; LEFT_PLUS_RIGHT_LEN]) -> Self { - Signature(data) - } -} +pub type Signature = + SignatureBytes; /// A key pair. -#[derive(Clone)] pub struct Pair< LeftPair: PairT, RightPair: PairT, const PUBLIC_KEY_LEN: usize, const SIGNATURE_LEN: usize, + SubTag, > { left: LeftPair, right: RightPair, + _phantom: PhantomData SubTag>, +} + +impl< + LeftPair: PairT + Clone, + RightPair: PairT + Clone, + const PUBLIC_KEY_LEN: usize, + const SIGNATURE_LEN: usize, + SubTag, + > Clone for Pair +{ + fn clone(&self) -> Self { + Self { left: self.left.clone(), right: self.right.clone(), _phantom: PhantomData } + } } impl< @@ -417,18 +202,18 @@ impl< RightPair: PairT, const PUBLIC_KEY_LEN: usize, const SIGNATURE_LEN: usize, - > PairT for Pair + SubTag: PairedCryptoSubTagBound, + > PairT for Pair where - Pair: CryptoType, - LeftPair::Signature: SignatureBound, - RightPair::Signature: SignatureBound, - Public: CryptoType, + Pair: CryptoType, + Public: PublicT, + Signature: SignatureT, LeftPair::Seed: From + Into, RightPair::Seed: From + Into, { type Seed = Seed; - type Public = Public; - type Signature = Signature; + type Public = Public; + type Signature = Signature; fn from_seed_slice(seed_slice: &[u8]) -> Result { if seed_slice.len() != SECURE_SEED_LEN { @@ -436,7 +221,7 @@ where } let left = LeftPair::from_seed_slice(&seed_slice)?; let right = RightPair::from_seed_slice(&seed_slice)?; - Ok(Pair { left, right }) + Ok(Pair { left, right, _phantom: PhantomData }) } /// Derive a child key from a series of given junctions. @@ -459,7 +244,7 @@ where _ => None, }; - Ok((Self { left: left.0, right: right.0 }, seed)) + Ok((Self { left: left.0, right: right.0, _phantom: PhantomData }, seed)) } fn public(&self) -> Self::Public { @@ -479,16 +264,18 @@ where Self::Signature::unchecked_from(raw) } - fn verify>(sig: &Self::Signature, message: M, public: &Self::Public) -> bool { + fn verify>( + sig: &Self::Signature, + message: Msg, + public: &Self::Public, + ) -> bool { let Ok(left_pub) = public.0[..LeftPair::Public::LEN].try_into() else { return false }; let Ok(left_sig) = sig.0[0..LeftPair::Signature::LEN].try_into() else { return false }; if !LeftPair::verify(&left_sig, message.as_ref(), &left_pub) { return false } - let Ok(right_pub) = public.0[LeftPair::Public::LEN..PUBLIC_KEY_LEN].try_into() else { - return false - }; + let Ok(right_pub) = public.0[LeftPair::Public::LEN..].try_into() else { return false }; let Ok(right_sig) = sig.0[LeftPair::Signature::LEN..].try_into() else { return false }; RightPair::verify(&right_sig, message.as_ref(), &right_pub) } @@ -503,13 +290,14 @@ where // Test set exercising the (ECDSA,BLS12-377) implementation #[cfg(all(test, feature = "bls-experimental"))] -mod test { +mod tests { use super::*; - use crate::{crypto::DEV_PHRASE, KeccakHasher}; + #[cfg(feature = "serde")] + use crate::crypto::Ss58Codec; + use crate::{bls377, crypto::DEV_PHRASE, ecdsa, KeccakHasher}; + use codec::{Decode, Encode}; use ecdsa_bls377::{Pair, Signature}; - use crate::{bls377, ecdsa}; - #[test] fn test_length_of_paired_ecdsa_and_bls377_public_key_and_signature_is_correct() { assert_eq!( diff --git a/substrate/primitives/core/src/sr25519.rs b/substrate/primitives/core/src/sr25519.rs index 386c04026f7f..64dd199ef3de 100644 --- a/substrate/primitives/core/src/sr25519.rs +++ b/substrate/primitives/core/src/sr25519.rs @@ -19,9 +19,10 @@ //! //! Note: `CHAIN_CODE_LENGTH` must be equal to `crate::crypto::JUNCTION_ID_LEN` //! for this to work. + #[cfg(feature = "serde")] use crate::crypto::Ss58Codec; -use crate::crypto::{DeriveError, DeriveJunction, Pair as TraitPair, SecretStringError}; +use crate::crypto::{CryptoBytes, DeriveError, DeriveJunction, Pair as TraitPair, SecretStringError}; use alloc::vec::Vec; #[cfg(feature = "full_crypto")] use schnorrkel::signing_context; @@ -30,13 +31,7 @@ use schnorrkel::{ ExpansionMode, Keypair, MiniSecretKey, PublicKey, SecretKey, }; -use crate::{ - crypto::{ - ByteArray, CryptoType, CryptoTypeId, Derive, FromEntropy, Public as TraitPublic, - UncheckedFrom, - }, - hash::{H256, H512}, -}; +use crate::crypto::{CryptoType, CryptoTypeId, Derive, Public as TraitPublic, SignatureBytes}; use codec::{Decode, Encode, MaxEncodedLen}; use core::ops::Deref; use scale_info::TypeInfo; @@ -46,6 +41,7 @@ use alloc::{format, string::String}; use schnorrkel::keys::{MINI_SECRET_KEY_LENGTH, SECRET_KEY_LENGTH}; #[cfg(feature = "serde")] use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; +#[cfg(feature = "std")] use sp_runtime_interface::pass_by::PassByInner; // signing context @@ -54,79 +50,36 @@ const SIGNING_CTX: &[u8] = b"substrate"; /// An identifier used to match public keys against sr25519 keys pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"sr25"); -/// An Schnorrkel/Ristretto x25519 ("sr25519") public key. -#[derive( - PartialEq, - Eq, - PartialOrd, - Ord, - Clone, - Copy, - Encode, - Decode, - PassByInner, - MaxEncodedLen, - TypeInfo, - Hash, -)] -pub struct Public(pub [u8; 32]); - -/// An Schnorrkel/Ristretto x25519 ("sr25519") key pair. -pub struct Pair(Keypair); - -impl Clone for Pair { - fn clone(&self) -> Self { - Pair(schnorrkel::Keypair { - public: self.0.public, - secret: schnorrkel::SecretKey::from_bytes(&self.0.secret.to_bytes()[..]) - .expect("key is always the correct size; qed"), - }) - } -} - -impl FromEntropy for Public { - fn from_entropy(input: &mut impl codec::Input) -> Result { - let mut result = Self([0u8; 32]); - input.read(&mut result.0[..])?; - Ok(result) - } -} - -impl AsRef<[u8; 32]> for Public { - fn as_ref(&self) -> &[u8; 32] { - &self.0 - } -} - -impl AsRef<[u8]> for Public { - fn as_ref(&self) -> &[u8] { - &self.0[..] - } -} +/// The byte length of public key +pub const PUBLIC_KEY_SERIALIZED_SIZE: usize = 32; -impl AsMut<[u8]> for Public { - fn as_mut(&mut self) -> &mut [u8] { - &mut self.0[..] - } -} +/// The byte length of signature +pub const SIGNATURE_SERIALIZED_SIZE: usize = 64; -impl Deref for Public { - type Target = [u8]; +#[doc(hidden)] +pub struct Sr25519Tag; +#[doc(hidden)] +pub struct Sr25519PublicTag; - fn deref(&self) -> &Self::Target { - &self.0 - } -} +/// An Schnorrkel/Ristretto x25519 ("sr25519") public key. +pub type Public = CryptoBytes; -impl From for [u8; 32] { - fn from(x: Public) -> [u8; 32] { - x.0 - } -} +impl TraitPublic for Public {} -impl From for H256 { - fn from(x: Public) -> H256 { - x.0.into() +impl Derive for Public { + /// Derive a child key from a series of given junctions. + /// + /// `None` if there are any hard junctions in there. + #[cfg(feature = "serde")] + fn derive>(&self, path: Iter) -> Option { + let mut acc = PublicKey::from_bytes(self.as_ref()).ok()?; + for j in path { + match j { + DeriveJunction::Soft(cc) => acc = acc.derived_key_simple(ChainCode(cc), &[]).0, + DeriveJunction::Hard(_cc) => return None, + } + } + Some(Self::from(acc.to_bytes())) } } @@ -139,31 +92,6 @@ impl std::str::FromStr for Public { } } -impl TryFrom<&[u8]> for Public { - type Error = (); - - fn try_from(data: &[u8]) -> Result { - if data.len() != Self::LEN { - return Err(()) - } - let mut r = [0u8; 32]; - r.copy_from_slice(data); - Ok(Self::unchecked_from(r)) - } -} - -impl UncheckedFrom<[u8; 32]> for Public { - fn unchecked_from(x: [u8; 32]) -> Self { - Public::from_raw(x) - } -} - -impl UncheckedFrom for Public { - fn unchecked_from(x: H256) -> Self { - Public::from_h256(x) - } -} - #[cfg(feature = "std")] impl std::fmt::Display for Public { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { @@ -175,7 +103,7 @@ impl core::fmt::Debug for Public { #[cfg(feature = "std")] fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { let s = self.to_ss58check(); - write!(f, "{} ({}...)", crate::hexdisplay::HexDisplay::from(&self.0), &s[0..8]) + write!(f, "{} ({}...)", crate::hexdisplay::HexDisplay::from(self.inner()), &s[0..8]) } #[cfg(not(feature = "std"))] @@ -206,188 +134,28 @@ impl<'de> Deserialize<'de> for Public { } /// An Schnorrkel/Ristretto x25519 ("sr25519") signature. -#[derive(Encode, Decode, MaxEncodedLen, PassByInner, TypeInfo, PartialEq, Eq, Hash)] -pub struct Signature(pub [u8; 64]); - -impl TryFrom<&[u8]> for Signature { - type Error = (); - - fn try_from(data: &[u8]) -> Result { - if data.len() == 64 { - let mut inner = [0u8; 64]; - inner.copy_from_slice(data); - Ok(Signature(inner)) - } else { - Err(()) - } - } -} - -#[cfg(feature = "serde")] -impl Serialize for Signature { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&array_bytes::bytes2hex("", self)) - } -} - -#[cfg(feature = "serde")] -impl<'de> Deserialize<'de> for Signature { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let signature_hex = array_bytes::hex2bytes(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e)))?; - Signature::try_from(signature_hex.as_ref()) - .map_err(|e| de::Error::custom(format!("{:?}", e))) - } -} - -impl Clone for Signature { - fn clone(&self) -> Self { - let mut r = [0u8; 64]; - r.copy_from_slice(&self.0[..]); - Signature(r) - } -} - -impl From for [u8; 64] { - fn from(v: Signature) -> [u8; 64] { - v.0 - } -} - -impl From for H512 { - fn from(v: Signature) -> H512 { - H512::from(v.0) - } -} - -impl AsRef<[u8; 64]> for Signature { - fn as_ref(&self) -> &[u8; 64] { - &self.0 - } -} - -impl AsRef<[u8]> for Signature { - fn as_ref(&self) -> &[u8] { - &self.0[..] - } -} - -impl AsMut<[u8]> for Signature { - fn as_mut(&mut self) -> &mut [u8] { - &mut self.0[..] - } -} +pub type Signature = SignatureBytes; #[cfg(feature = "full_crypto")] impl From for Signature { fn from(s: schnorrkel::Signature) -> Signature { - Signature(s.to_bytes()) + Signature::from(s.to_bytes()) } } -impl core::fmt::Debug for Signature { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - write!(f, "{}", crate::hexdisplay::HexDisplay::from(&self.0)) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut core::fmt::Formatter) -> core::fmt::Result { - Ok(()) - } -} - -impl UncheckedFrom<[u8; 64]> for Signature { - fn unchecked_from(data: [u8; 64]) -> Signature { - Signature(data) - } -} - -impl Signature { - /// A new instance from the given 64-byte `data`. - /// - /// NOTE: No checking goes on to ensure this is a real signature. Only use - /// it if you are certain that the array actually is a signature, or if you - /// immediately verify the signature. All functions that verify signatures - /// will fail if the `Signature` is not actually a valid signature. - pub fn from_raw(data: [u8; 64]) -> Signature { - Signature(data) - } - - /// A new instance from the given slice that should be 64 bytes long. - /// - /// NOTE: No checking goes on to ensure this is a real signature. Only use it if - /// you are certain that the array actually is a signature. GIGO! - pub fn from_slice(data: &[u8]) -> Option { - if data.len() != 64 { - return None - } - let mut r = [0u8; 64]; - r.copy_from_slice(data); - Some(Signature(r)) - } - - /// A new instance from an H512. - /// - /// NOTE: No checking goes on to ensure this is a real signature. Only use it if - /// you are certain that the array actually is a signature. GIGO! - pub fn from_h512(v: H512) -> Signature { - Signature(v.into()) - } -} - -impl Derive for Public { - /// Derive a child key from a series of given junctions. - /// - /// `None` if there are any hard junctions in there. - #[cfg(feature = "serde")] - fn derive>(&self, path: Iter) -> Option { - let mut acc = PublicKey::from_bytes(self.as_ref()).ok()?; - for j in path { - match j { - DeriveJunction::Soft(cc) => acc = acc.derived_key_simple(ChainCode(cc), &[]).0, - DeriveJunction::Hard(_cc) => return None, - } - } - Some(Self(acc.to_bytes())) - } -} - -impl Public { - /// A new instance from the given 32-byte `data`. - /// - /// NOTE: No checking goes on to ensure this is a real public key. Only use it if - /// you are certain that the array actually is a pubkey. GIGO! - pub fn from_raw(data: [u8; 32]) -> Self { - Public(data) - } - - /// A new instance from an H256. - /// - /// NOTE: No checking goes on to ensure this is a real public key. Only use it if - /// you are certain that the array actually is a pubkey. GIGO! - pub fn from_h256(x: H256) -> Self { - Public(x.into()) - } +/// An Schnorrkel/Ristretto x25519 ("sr25519") key pair. +pub struct Pair(Keypair); - /// Return a slice filled with raw data. - pub fn as_array_ref(&self) -> &[u8; 32] { - self.as_ref() +impl Clone for Pair { + fn clone(&self) -> Self { + Pair(schnorrkel::Keypair { + public: self.0.public, + secret: schnorrkel::SecretKey::from_bytes(&self.0.secret.to_bytes()[..]) + .expect("key is always the correct size; qed"), + }) } } -impl ByteArray for Public { - const LEN: usize = 32; -} - -impl TraitPublic for Public {} - #[cfg(feature = "std")] impl From for Pair { fn from(sec: MiniSecretKey) -> Pair { @@ -438,9 +206,7 @@ impl TraitPair for Pair { /// Get the public key. fn public(&self) -> Public { - let mut pk = [0u8; 32]; - pk.copy_from_slice(&self.0.public.to_bytes()); - Public(pk) + Public::from(self.0.public.to_bytes()) } /// Make a new key pair from raw secret seed material. @@ -720,7 +486,7 @@ pub mod vrf { impl VrfPublic for Public { fn vrf_verify(&self, data: &Self::VrfSignData, signature: &Self::VrfSignature) -> bool { let do_verify = || { - let public = schnorrkel::PublicKey::from_bytes(self)?; + let public = schnorrkel::PublicKey::from_bytes(&self.0)?; let inout = signature.pre_output.0.attach_input_hash(&public, data.transcript.0.clone())?; @@ -820,7 +586,10 @@ pub mod vrf { #[cfg(test)] mod tests { use super::{vrf::*, *}; - use crate::crypto::{Ss58Codec, VrfPublic, VrfSecret, DEV_ADDRESS, DEV_PHRASE}; + use crate::{ + crypto::{Ss58Codec, VrfPublic, VrfSecret, DEV_ADDRESS, DEV_PHRASE}, + ByteArray as _, + }; use serde_json; #[test] @@ -984,10 +753,10 @@ mod tests { let (pair, _) = Pair::generate(); let public = pair.public(); let message = b"Signed payload"; - let Signature(mut bytes) = pair.sign(&message[..]); + let mut signature = pair.sign(&message[..]); + let bytes = &mut signature.0; bytes[0] = !bytes[0]; bytes[2] = !bytes[2]; - let signature = Signature(bytes); assert!(!Pair::verify(&signature, &message[..], &public)); } diff --git a/substrate/primitives/inherents/Cargo.toml b/substrate/primitives/inherents/Cargo.toml index 6463c423fe7b..c08ac459de53 100644 --- a/substrate/primitives/inherents/Cargo.toml +++ b/substrate/primitives/inherents/Cargo.toml @@ -17,15 +17,15 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.74", optional = true } +async-trait = { version = "0.1.79", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" thiserror = { optional = true, workspace = true } sp-runtime = { path = "../runtime", default-features = false, optional = true } [dev-dependencies] -futures = "0.3.21" +futures = "0.3.30" [features] default = ["std"] diff --git a/substrate/primitives/inherents/src/client_side.rs b/substrate/primitives/inherents/src/client_side.rs index 27479de136f2..3c299dfa4eea 100644 --- a/substrate/primitives/inherents/src/client_side.rs +++ b/substrate/primitives/inherents/src/client_side.rs @@ -23,7 +23,7 @@ use sp_runtime::traits::Block as BlockT; /// It is possible for the caller to provide custom arguments to the callee by setting the /// `ExtraArgs` generic parameter. /// -/// The crate already provides some convience implementations of this trait for +/// The crate already provides some convince implementations of this trait for /// `Box` and closures. So, it should not be required to implement /// this trait manually. #[async_trait::async_trait] diff --git a/substrate/primitives/io/Cargo.toml b/substrate/primitives/io/Cargo.toml index 3e422b91cf25..e8da730bfae2 100644 --- a/substrate/primitives/io/Cargo.toml +++ b/substrate/primitives/io/Cargo.toml @@ -83,7 +83,7 @@ disable_allocator = [] # This gives the caller direct programmatic access to the error message. # # When disabled the error message will only be printed out in the -# logs, with the caller receving a generic "wasm `unreachable` instruction executed" +# logs, with the caller receiving a generic "wasm `unreachable` instruction executed" # error message. # # This has no effect if both `disable_panic_handler` and `disable_oom` diff --git a/substrate/primitives/io/src/lib.rs b/substrate/primitives/io/src/lib.rs index 49a415ae7d2c..b86f5680bdb3 100644 --- a/substrate/primitives/io/src/lib.rs +++ b/substrate/primitives/io/src/lib.rs @@ -1082,7 +1082,7 @@ pub trait Crypto { /// Register a `ecdsa` signature for batch verification. /// /// Batch verification must be enabled by calling [`start_batch_verify`]. - /// If batch verification is not enabled, the signature will be verified immediatley. + /// If batch verification is not enabled, the signature will be verified immediately. /// To get the result of the batch verification, [`finish_batch_verify`] /// needs to be called. /// @@ -1697,9 +1697,9 @@ mod tracing_setup { /// The PassingTracingSubscriber implements `tracing_core::Subscriber` /// and pushes the information across the runtime interface to the host - struct PassingTracingSubsciber; + struct PassingTracingSubscriber; - impl tracing_core::Subscriber for PassingTracingSubsciber { + impl tracing_core::Subscriber for PassingTracingSubscriber { fn enabled(&self, metadata: &Metadata<'_>) -> bool { wasm_tracing::enabled(Crossing(metadata.into())) } @@ -1732,7 +1732,7 @@ mod tracing_setup { /// set the global bridging subscriber once. pub fn init_tracing() { if TRACING_SET.load(Ordering::Relaxed) == false { - set_global_default(Dispatch::new(PassingTracingSubsciber {})) + set_global_default(Dispatch::new(PassingTracingSubscriber {})) .expect("We only ever call this once"); TRACING_SET.store(true, Ordering::Relaxed); } diff --git a/substrate/primitives/keyring/Cargo.toml b/substrate/primitives/keyring/Cargo.toml index 940fe90916d2..7471e9cf8ffa 100644 --- a/substrate/primitives/keyring/Cargo.toml +++ b/substrate/primitives/keyring/Cargo.toml @@ -17,7 +17,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -strum = { version = "0.24.1", features = ["derive"], default-features = false } +strum = { version = "0.26.2", features = ["derive"], default-features = false } sp-core = { path = "../core", default-features = false } sp-runtime = { path = "../runtime", default-features = false } diff --git a/substrate/primitives/keystore/src/testing.rs b/substrate/primitives/keystore/src/testing.rs index e10660b126a3..d8610ecfa5b6 100644 --- a/substrate/primitives/keystore/src/testing.rs +++ b/substrate/primitives/keystore/src/testing.rs @@ -528,7 +528,7 @@ mod tests { assert!(res.is_some()); // does not verify with default out-of-the-box verification - assert!(!ecdsa_bls377::Pair::verify(&res.clone().unwrap(), &msg[..], &pair.public())); + assert!(!ecdsa_bls377::Pair::verify(&res.unwrap(), &msg[..], &pair.public())); // should verify using keccak256 as hasher assert!(ecdsa_bls377::Pair::verify_with_hasher::( diff --git a/substrate/primitives/maybe-compressed-blob/Cargo.toml b/substrate/primitives/maybe-compressed-blob/Cargo.toml index fa5383d03b10..178c915ce837 100644 --- a/substrate/primitives/maybe-compressed-blob/Cargo.toml +++ b/substrate/primitives/maybe-compressed-blob/Cargo.toml @@ -6,7 +6,7 @@ edition.workspace = true license = "Apache-2.0" homepage = "https://substrate.io" repository.workspace = true -description = "Handling of blobs, usually Wasm code, which may be compresed" +description = "Handling of blobs, usually Wasm code, which may be compressed" documentation = "https://docs.rs/sp-maybe-compressed-blob" readme = "README.md" diff --git a/substrate/primitives/merkle-mountain-range/Cargo.toml b/substrate/primitives/merkle-mountain-range/Cargo.toml index 9c07f699b37a..891f893a0c96 100644 --- a/substrate/primitives/merkle-mountain-range/Cargo.toml +++ b/substrate/primitives/merkle-mountain-range/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } log = { workspace = true } mmr-lib = { package = "ckb-merkle-mountain-range", version = "0.5.2", default-features = false } serde = { features = ["alloc", "derive"], optional = true, workspace = true } diff --git a/substrate/primitives/metadata-ir/Cargo.toml b/substrate/primitives/metadata-ir/Cargo.toml index 31c839b5c485..ca8408d0ad97 100644 --- a/substrate/primitives/metadata-ir/Cargo.toml +++ b/substrate/primitives/metadata-ir/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } frame-metadata = { version = "16.0.0", default-features = false, features = ["current"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } [features] default = ["std"] diff --git a/substrate/primitives/metadata-ir/src/types.rs b/substrate/primitives/metadata-ir/src/types.rs index 1f11cbe47ca0..fb163f0a1d13 100644 --- a/substrate/primitives/metadata-ir/src/types.rs +++ b/substrate/primitives/metadata-ir/src/types.rs @@ -161,7 +161,7 @@ pub struct ExtrinsicMetadataIR { pub ty: T::Type, /// Extrinsic version. pub version: u8, - /// The type of the address that signes the extrinsic + /// The type of the address that signs the extrinsic pub address_ty: T::Type, /// The type of the outermost Call enum. pub call_ty: T::Type, diff --git a/substrate/primitives/mixnet/Cargo.toml b/substrate/primitives/mixnet/Cargo.toml index 8ba7f36da43c..07840ca63cb2 100644 --- a/substrate/primitives/mixnet/Cargo.toml +++ b/substrate/primitives/mixnet/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-api = { default-features = false, path = "../api" } sp-application-crypto = { default-features = false, path = "../application-crypto" } diff --git a/substrate/primitives/npos-elections/Cargo.toml b/substrate/primitives/npos-elections/Cargo.toml index b0b9890c0619..afa59af64d6d 100644 --- a/substrate/primitives/npos-elections/Cargo.toml +++ b/substrate/primitives/npos-elections/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-arithmetic = { path = "../arithmetic", default-features = false } sp-core = { path = "../core", default-features = false } diff --git a/substrate/primitives/npos-elections/src/pjr.rs b/substrate/primitives/npos-elections/src/pjr.rs index b42191dcd101..01569c975214 100644 --- a/substrate/primitives/npos-elections/src/pjr.rs +++ b/substrate/primitives/npos-elections/src/pjr.rs @@ -263,7 +263,7 @@ fn prepare_pjr_input( } } - // Convert Suppports into a SupportMap + // Convert Supports into a SupportMap // // As a flat list, we're limited to linear search. That gives the production of `candidates`, // below, a complexity of `O(s*c)`, where `s == supports.len()` and `c == all_candidates.len()`. diff --git a/substrate/primitives/npos-elections/src/reduce.rs b/substrate/primitives/npos-elections/src/reduce.rs index 90b052a1c8ca..047994d00327 100644 --- a/substrate/primitives/npos-elections/src/reduce.rs +++ b/substrate/primitives/npos-elections/src/reduce.rs @@ -394,7 +394,7 @@ fn reduce_all(assignments: &mut Vec>) -> u32 // voter_root_path.last().unwrap()); TODO: @kian // the common path must be non-void.. debug_assert!(common_count > 0); - // and smaller than btoh + // and smaller than both debug_assert!(common_count <= voter_root_path.len()); debug_assert!(common_count <= target_root_path.len()); diff --git a/substrate/primitives/runtime-interface/src/lib.rs b/substrate/primitives/runtime-interface/src/lib.rs index aa473c7b9a4b..622891f15c01 100644 --- a/substrate/primitives/runtime-interface/src/lib.rs +++ b/substrate/primitives/runtime-interface/src/lib.rs @@ -282,7 +282,7 @@ pub use sp_tracing; /// /// `key` holds the pointer and the length to the `data` slice. /// pub fn call(data: &[u8]) -> Vec { /// extern "C" { pub fn ext_call_version_2(key: u64); } -/// // Should call into extenal `ext_call_version_2(<[u8] as IntoFFIValue>::into_ffi_value(key))` +/// // Should call into external `ext_call_version_2(<[u8] as IntoFFIValue>::into_ffi_value(key))` /// // But this is too much to replicate in a doc test so here we just return a dummy vector. /// // Note that we jump into the latest version not marked as `register_only` (i.e. version 2). /// Vec::new() diff --git a/substrate/primitives/runtime-interface/test-wasm-deprecated/src/lib.rs b/substrate/primitives/runtime-interface/test-wasm-deprecated/src/lib.rs index 2f42e60504eb..871a4922ce3a 100644 --- a/substrate/primitives/runtime-interface/test-wasm-deprecated/src/lib.rs +++ b/substrate/primitives/runtime-interface/test-wasm-deprecated/src/lib.rs @@ -45,19 +45,19 @@ pub fn import_sp_io() { #[runtime_interface] pub trait TestApi { - fn test_versionning(&self, _data: u32) -> bool { + fn test_versioning(&self, _data: u32) -> bool { // should not be called unimplemented!() } } wasm_export_functions! { - fn test_versionning_works() { + fn test_versioning_works() { // old api allows only 42 and 50 - assert!(test_api::test_versionning(42)); - assert!(test_api::test_versionning(50)); + assert!(test_api::test_versioning(42)); + assert!(test_api::test_versioning(50)); - assert!(!test_api::test_versionning(142)); - assert!(!test_api::test_versionning(0)); + assert!(!test_api::test_versioning(142)); + assert!(!test_api::test_versioning(0)); } } diff --git a/substrate/primitives/runtime-interface/test-wasm/src/lib.rs b/substrate/primitives/runtime-interface/test-wasm/src/lib.rs index 64d77ad69cf0..c838ccfeae70 100644 --- a/substrate/primitives/runtime-interface/test-wasm/src/lib.rs +++ b/substrate/primitives/runtime-interface/test-wasm/src/lib.rs @@ -129,21 +129,21 @@ pub trait TestApi { val } - fn test_versionning(&self, data: u32) -> bool { + fn test_versioning(&self, data: u32) -> bool { data == 42 || data == 50 } #[version(2)] - fn test_versionning(&self, data: u32) -> bool { + fn test_versioning(&self, data: u32) -> bool { data == 42 } - fn test_versionning_register_only(&self, data: u32) -> bool { + fn test_versioning_register_only(&self, data: u32) -> bool { data == 80 } #[version(2, register_only)] - fn test_versionning_register_only(&self, data: u32) -> bool { + fn test_versioning_register_only(&self, data: u32) -> bool { data == 42 } @@ -285,21 +285,21 @@ wasm_export_functions! { assert_eq!(0, len); } - fn test_versionning_works() { + fn test_versioning_works() { // we fix new api to accept only 42 as a proper input // as opposed to sp-runtime-interface-test-wasm-deprecated::test_api::verify_input // which accepted 42 and 50. - assert!(test_api::test_versionning(42)); + assert!(test_api::test_versioning(42)); - assert!(!test_api::test_versionning(50)); - assert!(!test_api::test_versionning(102)); + assert!(!test_api::test_versioning(50)); + assert!(!test_api::test_versioning(102)); } - fn test_versionning_register_only_works() { + fn test_versioning_register_only_works() { // Ensure that we will import the version of the runtime interface function that // isn't tagged with `register_only`. - assert!(!test_api::test_versionning_register_only(42)); - assert!(test_api::test_versionning_register_only(80)); + assert!(!test_api::test_versioning_register_only(42)); + assert!(test_api::test_versioning_register_only(80)); } fn test_return_input_as_tuple() { diff --git a/substrate/primitives/runtime-interface/test/src/lib.rs b/substrate/primitives/runtime-interface/test/src/lib.rs index 215704a11215..05a955fbe3f8 100644 --- a/substrate/primitives/runtime-interface/test/src/lib.rs +++ b/substrate/primitives/runtime-interface/test/src/lib.rs @@ -163,18 +163,18 @@ fn test_array_return_value_memory_is_freed() { } #[test] -fn test_versionining_with_new_host_works() { +fn test_versioning_with_new_host_works() { // We call to the new wasm binary with new host function. - call_wasm_method::(wasm_binary_unwrap(), "test_versionning_works"); + call_wasm_method::(wasm_binary_unwrap(), "test_versioning_works"); // we call to the old wasm binary with a new host functions // old versions of host functions should be called and test should be ok! - call_wasm_method::(wasm_binary_deprecated_unwrap(), "test_versionning_works"); + call_wasm_method::(wasm_binary_deprecated_unwrap(), "test_versioning_works"); } #[test] -fn test_versionining_register_only() { - call_wasm_method::(wasm_binary_unwrap(), "test_versionning_register_only_works"); +fn test_versioning_register_only() { + call_wasm_method::(wasm_binary_unwrap(), "test_versioning_register_only_works"); } fn run_test_in_another_process( diff --git a/substrate/primitives/runtime/Cargo.toml b/substrate/primitives/runtime/Cargo.toml index 50b0be7aa8b1..cfbe15d31a1c 100644 --- a/substrate/primitives/runtime/Cargo.toml +++ b/substrate/primitives/runtime/Cargo.toml @@ -24,14 +24,14 @@ impl-trait-for-tuples = "0.2.2" log = { workspace = true } paste = "1.0" rand = { version = "0.8.5", optional = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-application-crypto = { path = "../application-crypto", default-features = false } sp-arithmetic = { path = "../arithmetic", default-features = false } sp-core = { path = "../core", default-features = false } sp-io = { path = "../io", default-features = false } sp-weights = { path = "../weights", default-features = false } -docify = { version = "0.2.7" } +docify = "0.2.8" simple-mermaid = { version = "0.1.1", optional = true } diff --git a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs index 6fa0a643f353..43614a5d4a63 100644 --- a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs +++ b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs @@ -42,7 +42,7 @@ use sp_io::hashing::blake2_256; /// the decoding fails. const EXTRINSIC_FORMAT_VERSION: u8 = 4; -/// The `SingaturePayload` of `UncheckedExtrinsic`. +/// The `SignaturePayload` of `UncheckedExtrinsic`. type UncheckedSignaturePayload = (Address, Signature, Extra); /// An extrinsic right from the external world. This is unchecked and so can contain a signature. diff --git a/substrate/primitives/runtime/src/multiaddress.rs b/substrate/primitives/runtime/src/multiaddress.rs index d479558f4477..1f440599eb4a 100644 --- a/substrate/primitives/runtime/src/multiaddress.rs +++ b/substrate/primitives/runtime/src/multiaddress.rs @@ -33,7 +33,7 @@ pub enum MultiAddress { Raw(Vec), /// It's a 32 byte representation. Address32([u8; 32]), - /// Its a 20 byte representation. + /// It's a 20 byte representation. Address20([u8; 20]), } diff --git a/substrate/primitives/runtime/src/offchain/storage_lock.rs b/substrate/primitives/runtime/src/offchain/storage_lock.rs index 56d0eeae527c..cfdaa954fe5e 100644 --- a/substrate/primitives/runtime/src/offchain/storage_lock.rs +++ b/substrate/primitives/runtime/src/offchain/storage_lock.rs @@ -556,7 +556,7 @@ mod tests { let res = lock.try_lock(); assert_eq!(res.is_ok(), false); - // sleep again untill sleep_until > deadline + // sleep again until sleep_until > deadline offchain::sleep_until(offchain::timestamp().add(Duration::from_millis(200))); // the lock has expired, failed to extend it diff --git a/substrate/primitives/runtime/src/runtime_string.rs b/substrate/primitives/runtime/src/runtime_string.rs index d91becfc8e4c..c054860993fd 100644 --- a/substrate/primitives/runtime/src/runtime_string.rs +++ b/substrate/primitives/runtime/src/runtime_string.rs @@ -62,6 +62,19 @@ impl From<&'static str> for RuntimeString { } } +impl<'a> TryFrom<&'a RuntimeString> for &'a str { + type Error = core::str::Utf8Error; + fn try_from(from: &'a RuntimeString) -> core::result::Result<&'a str, Self::Error> { + match from { + #[cfg(feature = "std")] + RuntimeString::Owned(string) => Ok(string.as_str()), + #[cfg(not(feature = "std"))] + RuntimeString::Owned(vec) => core::str::from_utf8(&vec), + RuntimeString::Borrowed(str) => Ok(str), + } + } +} + #[cfg(feature = "std")] impl From for String { fn from(string: RuntimeString) -> Self { diff --git a/substrate/primitives/runtime/src/testing.rs b/substrate/primitives/runtime/src/testing.rs index 5f94c834a8f2..b4aeda5a0e7a 100644 --- a/substrate/primitives/runtime/src/testing.rs +++ b/substrate/primitives/runtime/src/testing.rs @@ -284,9 +284,9 @@ where } /// The signature payload of a `TestXt`. -type TxSingaturePayload = (u64, Extra); +type TxSignaturePayload = (u64, Extra); -impl SignaturePayload for TxSingaturePayload { +impl SignaturePayload for TxSignaturePayload { type SignatureAddress = u64; type Signature = (); type SignatureExtra = Extra; @@ -299,7 +299,7 @@ impl SignaturePayload for TxSingaturePayload { #[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo)] pub struct TestXt { /// Signature of the extrinsic. - pub signature: Option>, + pub signature: Option>, /// Call of the extrinsic. pub call: Call, } @@ -348,7 +348,7 @@ impl traits::Extrinsic for TestXt { type Call = Call; - type SignaturePayload = TxSingaturePayload; + type SignaturePayload = TxSignaturePayload; fn is_signed(&self) -> Option { Some(self.signature.is_some()) diff --git a/substrate/primitives/runtime/src/traits.rs b/substrate/primitives/runtime/src/traits.rs index 7860e7f08af8..7a4f7b6b389e 100644 --- a/substrate/primitives/runtime/src/traits.rs +++ b/substrate/primitives/runtime/src/traits.rs @@ -135,7 +135,7 @@ impl Verify for sp_core::ecdsa::Signature { self.as_ref(), &sp_io::hashing::blake2_256(msg.get()), ) { - Ok(pubkey) => signer.as_ref() == &pubkey[..], + Ok(pubkey) => signer.0 == pubkey, _ => false, } } @@ -332,7 +332,7 @@ impl> Morph for MorphInto { } } -/// Implementation of `TryMorph` which attmepts to convert between types using `TryInto`. +/// Implementation of `TryMorph` which attempts to convert between types using `TryInto`. pub struct TryMorphInto(core::marker::PhantomData); impl> TryMorph for TryMorphInto { type Outcome = T; @@ -1451,7 +1451,7 @@ pub trait Dispatchable { /// to represent the dispatch class and weight. type Info; /// Additional information that is returned by `dispatch`. Can be used to supply the caller - /// with information about a `Dispatchable` that is ownly known post dispatch. + /// with information about a `Dispatchable` that is only known post dispatch. type PostInfo: Eq + PartialEq + Clone + Copy + Encode + Decode + Printable; /// Actually dispatch this call and return the result of it. fn dispatch(self, origin: Self::RuntimeOrigin) diff --git a/substrate/primitives/session/Cargo.toml b/substrate/primitives/session/Cargo.toml index 784228c42e9b..cdee4fb03e12 100644 --- a/substrate/primitives/session/Cargo.toml +++ b/substrate/primitives/session/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-api = { path = "../api", default-features = false } sp-core = { path = "../core", default-features = false } sp-runtime = { path = "../runtime", optional = true } diff --git a/substrate/primitives/staking/Cargo.toml b/substrate/primitives/staking/Cargo.toml index 6304551b8e60..e380abb6a8c0 100644 --- a/substrate/primitives/staking/Cargo.toml +++ b/substrate/primitives/staking/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { features = ["alloc", "derive"], optional = true, workspace = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" sp-core = { path = "../core", default-features = false } diff --git a/substrate/primitives/staking/src/offence.rs b/substrate/primitives/staking/src/offence.rs index d0386e728b37..cb7c7c3c4d0f 100644 --- a/substrate/primitives/staking/src/offence.rs +++ b/substrate/primitives/staking/src/offence.rs @@ -118,7 +118,7 @@ pub trait Offence { /// Errors that may happen on offence reports. #[derive(PartialEq, sp_runtime::RuntimeDebug)] pub enum OffenceError { - /// The report has already been sumbmitted. + /// The report has already been submitted. DuplicateReport, /// Other error has happened. diff --git a/substrate/primitives/state-machine/src/ext.rs b/substrate/primitives/state-machine/src/ext.rs index 88aa0bf29289..785838d2b3a3 100644 --- a/substrate/primitives/state-machine/src/ext.rs +++ b/substrate/primitives/state-machine/src/ext.rs @@ -45,7 +45,7 @@ const EXT_NOT_ALLOWED_TO_FAIL: &str = "Externalities not allowed to fail within const BENCHMARKING_FN: &str = "\ This is a special fn only for benchmarking where a database commit happens from the runtime. For that reason client started transactions before calling into runtime are not allowed. - Without client transactions the loop condition garantuees the success of the tx close."; + Without client transactions the loop condition guarantees the success of the tx close."; #[cfg(feature = "std")] fn guard() -> sp_panic_handler::AbortGuard { @@ -723,7 +723,7 @@ impl Encode for EncodeOpaqueValue { } } -/// Auxialiary structure for appending a value to a storage item. +/// Auxiliary structure for appending a value to a storage item. pub(crate) struct StorageAppend<'a>(&'a mut Vec); impl<'a> StorageAppend<'a> { diff --git a/substrate/primitives/state-machine/src/lib.rs b/substrate/primitives/state-machine/src/lib.rs index 200cebe68de5..13087431d387 100644 --- a/substrate/primitives/state-machine/src/lib.rs +++ b/substrate/primitives/state-machine/src/lib.rs @@ -1451,7 +1451,7 @@ mod tests { enum Item { InitializationItem, DiscardedItem, - CommitedItem, + CommittedItem, } let key = b"events".to_vec(); @@ -1488,21 +1488,21 @@ mod tests { assert_eq!(ext.storage(key.as_slice()), Some(vec![Item::InitializationItem].encode())); - ext.storage_append(key.clone(), Item::CommitedItem.encode()); + ext.storage_append(key.clone(), Item::CommittedItem.encode()); assert_eq!( ext.storage(key.as_slice()), - Some(vec![Item::InitializationItem, Item::CommitedItem].encode()), + Some(vec![Item::InitializationItem, Item::CommittedItem].encode()), ); } overlay.start_transaction(); - // Then only initlaization item and second (committed) item should persist. + // Then only initialization item and second (committed) item should persist. { let ext = Ext::new(&mut overlay, backend, None); assert_eq!( ext.storage(key.as_slice()), - Some(vec![Item::InitializationItem, Item::CommitedItem].encode()), + Some(vec![Item::InitializationItem, Item::CommittedItem].encode()), ); } } @@ -1866,7 +1866,7 @@ mod tests { // a inner hashable node (&b"k"[..], Some(&long_vec[..])), // need to ensure this is not an inline node - // otherwhise we do not know what is accessed when + // otherwise we do not know what is accessed when // storing proof. (&b"key1"[..], Some(&vec![5u8; 32][..])), (&b"key2"[..], Some(&b"val3"[..])), diff --git a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs index a25a5b810522..601bc2e29198 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -47,7 +47,7 @@ pub struct NoOpenTransaction; #[cfg_attr(test, derive(PartialEq))] pub struct AlreadyInRuntime; -/// Error when calling `exit_runtime` when not being in runtime exection mdde. +/// Error when calling `exit_runtime` when not being in runtime execution mode. #[derive(Debug)] #[cfg_attr(test, derive(PartialEq))] pub struct NotInRuntime; @@ -269,7 +269,7 @@ impl OverlayedMap { /// /// Panics: /// Panics if there are open transactions: `transaction_depth() > 0` - pub fn drain_commited(self) -> impl Iterator { + pub fn drain_committed(self) -> impl Iterator { assert!(self.transaction_depth() == 0, "Drain is not allowed with open transactions."); self.changes.into_iter().map(|(k, mut v)| (k, v.pop_transaction().value)) } @@ -281,7 +281,7 @@ impl OverlayedMap { self.dirty_keys.len() } - /// Call this before transfering control to the runtime. + /// Call this before transferring control to the runtime. /// /// This protects all existing transactions from being removed by the runtime. /// Calling this while already inside the runtime will return an error. @@ -471,7 +471,7 @@ mod test { } fn assert_drained_changes(is: OverlayedChangeSet, expected: Changes) { - let is = is.drain_commited().collect::>(); + let is = is.drain_committed().collect::>(); let expected = expected .iter() .map(|(k, v)| (k.to_vec(), v.0.map(From::from))) @@ -480,7 +480,7 @@ mod test { } fn assert_drained(is: OverlayedChangeSet, expected: Drained) { - let is = is.drain_commited().collect::>(); + let is = is.drain_committed().collect::>(); let expected = expected .iter() .map(|(k, v)| (k.to_vec(), v.map(From::from))) @@ -526,7 +526,7 @@ mod test { changeset.set(b"key0".to_vec(), Some(b"val0-rolled".to_vec()), Some(1000)); changeset.set(b"key5".to_vec(), Some(b"val5-rolled".to_vec()), None); - // changes contain all changes not only the commmited ones. + // changes contain all changes not only the committed ones. let all_changes: Changes = vec![ (b"key0", (Some(b"val0-rolled"), vec![1, 10, 1000])), (b"key1", (Some(b"val1"), vec![1])), @@ -807,7 +807,7 @@ mod test { fn drain_with_open_transaction_panics() { let mut changeset = OverlayedChangeSet::default(); changeset.start_transaction(); - let _ = changeset.drain_commited(); + let _ = changeset.drain_committed(); } #[test] diff --git a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs index f49f513967e6..3cd1f9899c1c 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs @@ -481,7 +481,7 @@ impl OverlayedChanges { Ok(()) } - /// Call this before transfering control to the runtime. + /// Call this before transferring control to the runtime. /// /// This protects all existing transactions from being removed by the runtime. /// Calling this while already inside the runtime will return an error. @@ -576,10 +576,10 @@ impl OverlayedChanges { }; use core::mem::take; - let main_storage_changes = take(&mut self.top).drain_commited(); + let main_storage_changes = take(&mut self.top).drain_committed(); let child_storage_changes = take(&mut self.children) .into_iter() - .map(|(key, (val, info))| (key, (val.drain_commited(), info))); + .map(|(key, (val, info))| (key, (val.drain_committed(), info))); let offchain_storage_changes = self.offchain_drain_committed().collect(); @@ -810,7 +810,7 @@ pub struct OverlayedExtensions<'a> { #[cfg(feature = "std")] impl<'a> OverlayedExtensions<'a> { - /// Create a new instance of overalyed extensions from the given extensions. + /// Create a new instance of overlaid extensions from the given extensions. pub fn new(extensions: &'a mut Extensions) -> Self { Self { extensions: extensions diff --git a/substrate/primitives/state-machine/src/testing.rs b/substrate/primitives/state-machine/src/testing.rs index 0eb7b6d1118f..e19ba95755c1 100644 --- a/substrate/primitives/state-machine/src/testing.rs +++ b/substrate/primitives/state-machine/src/testing.rs @@ -417,7 +417,7 @@ mod tests { original_ext.backend.clone().into_storage(), ); - // Ensure all have the correct ref counrt + // Ensure all have the correct ref count assert!(original_ext.backend.backend_storage().keys().values().all(|r| *r == 2)); // Drain the raw storage and root. diff --git a/substrate/primitives/state-machine/src/trie_backend_essence.rs b/substrate/primitives/state-machine/src/trie_backend_essence.rs index 299cb6b644c0..c7594b3b1b5c 100644 --- a/substrate/primitives/state-machine/src/trie_backend_essence.rs +++ b/substrate/primitives/state-machine/src/trie_backend_essence.rs @@ -35,8 +35,8 @@ use parking_lot::RwLock; use sp_core::storage::{ChildInfo, ChildType, StateVersion}; use sp_trie::{ child_delta_trie_root, delta_trie_root, empty_child_trie_root, - read_child_trie_first_descedant_value, read_child_trie_hash, read_child_trie_value, - read_trie_first_descedant_value, read_trie_value, + read_child_trie_first_descendant_value, read_child_trie_hash, read_child_trie_value, + read_trie_first_descendant_value, read_trie_value, trie_types::{TrieDBBuilder, TrieError}, DBValue, KeySpacedDB, MerkleValue, NodeCodec, PrefixedMemoryDB, Trie, TrieCache, TrieDBRawIterator, TrieRecorder, TrieRecorderProvider, @@ -555,7 +555,7 @@ where let map_e = |e| format!("Trie lookup error: {}", e); self.with_recorder_and_cache(None, |recorder, cache| { - read_trie_first_descedant_value::, _>(self, &self.root, key, recorder, cache) + read_trie_first_descendant_value::, _>(self, &self.root, key, recorder, cache) .map_err(map_e) }) } @@ -571,7 +571,7 @@ where let map_e = |e| format!("Trie lookup error: {}", e); self.with_recorder_and_cache(Some(child_root), |recorder, cache| { - read_child_trie_first_descedant_value::, _>( + read_child_trie_first_descendant_value::, _>( child_info.keyspace(), self, &child_root, diff --git a/substrate/primitives/statement-store/Cargo.toml b/substrate/primitives/statement-store/Cargo.toml index 000fcd987040..b36bff69a007 100644 --- a/substrate/primitives/statement-store/Cargo.toml +++ b/substrate/primitives/statement-store/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-core = { path = "../core", default-features = false } sp-crypto-hashing = { path = "../crypto/hashing", default-features = false } sp-runtime = { path = "../runtime", default-features = false } diff --git a/substrate/primitives/statement-store/src/ecies.rs b/substrate/primitives/statement-store/src/ecies.rs index 80a040fd4c8e..6fa16658e007 100644 --- a/substrate/primitives/statement-store/src/ecies.rs +++ b/substrate/primitives/statement-store/src/ecies.rs @@ -148,7 +148,7 @@ mod test { #[test] fn basic_ed25519_encryption() { let (pair, _) = sp_core::ed25519::Pair::generate(); - let pk = pair.into(); + let pk = pair.public(); let plain_message = b"An important secret message"; let encrypted = encrypt_ed25519(&pk, plain_message).unwrap(); diff --git a/substrate/primitives/statement-store/src/lib.rs b/substrate/primitives/statement-store/src/lib.rs index f723880f4e2f..c9092b83784a 100644 --- a/substrate/primitives/statement-store/src/lib.rs +++ b/substrate/primitives/statement-store/src/lib.rs @@ -341,8 +341,8 @@ impl Statement { Some(Proof::OnChain { .. }) | None => SignatureVerificationResult::NoSignature, Some(Proof::Sr25519 { signature, signer }) => { let to_sign = self.signature_material(); - let signature = sp_core::sr25519::Signature(*signature); - let public = sp_core::sr25519::Public(*signer); + let signature = sp_core::sr25519::Signature::from(*signature); + let public = sp_core::sr25519::Public::from(*signer); if signature.verify(to_sign.as_slice(), &public) { SignatureVerificationResult::Valid(*signer) } else { @@ -351,8 +351,8 @@ impl Statement { }, Some(Proof::Ed25519 { signature, signer }) => { let to_sign = self.signature_material(); - let signature = sp_core::ed25519::Signature(*signature); - let public = sp_core::ed25519::Public(*signer); + let signature = sp_core::ed25519::Signature::from(*signature); + let public = sp_core::ed25519::Public::from(*signer); if signature.verify(to_sign.as_slice(), &public) { SignatureVerificationResult::Valid(*signer) } else { @@ -361,8 +361,8 @@ impl Statement { }, Some(Proof::Secp256k1Ecdsa { signature, signer }) => { let to_sign = self.signature_material(); - let signature = sp_core::ecdsa::Signature(*signature); - let public = sp_core::ecdsa::Public(*signer); + let signature = sp_core::ecdsa::Signature::from(*signature); + let public = sp_core::ecdsa::Public::from(*signer); if signature.verify(to_sign.as_slice(), &public) { let sender_hash = ::hash(signer); diff --git a/substrate/primitives/storage/src/lib.rs b/substrate/primitives/storage/src/lib.rs index ea3bef8be7f9..32ab79319dcf 100644 --- a/substrate/primitives/storage/src/lib.rs +++ b/substrate/primitives/storage/src/lib.rs @@ -453,7 +453,7 @@ impl TryFrom for StateVersion { impl StateVersion { /// If defined, values in state of size bigger or equal /// to this threshold will use a separate trie node. - /// Otherwhise, value will be inlined in branch or leaf + /// Otherwise, value will be inlined in branch or leaf /// node. pub fn state_value_threshold(&self) -> Option { match self { diff --git a/substrate/primitives/test-primitives/Cargo.toml b/substrate/primitives/test-primitives/Cargo.toml index 1b51c5f59190..051355543156 100644 --- a/substrate/primitives/test-primitives/Cargo.toml +++ b/substrate/primitives/test-primitives/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], optional = true, workspace = true } sp-application-crypto = { path = "../application-crypto", default-features = false } sp-core = { path = "../core", default-features = false } diff --git a/substrate/primitives/timestamp/Cargo.toml b/substrate/primitives/timestamp/Cargo.toml index 11afb1755908..5a1d4fcc985c 100644 --- a/substrate/primitives/timestamp/Cargo.toml +++ b/substrate/primitives/timestamp/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.74", optional = true } +async-trait = { version = "0.1.79", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } thiserror = { optional = true, workspace = true } sp-inherents = { path = "../inherents", default-features = false } diff --git a/substrate/primitives/tracing/src/lib.rs b/substrate/primitives/tracing/src/lib.rs index 3fdf5a0a1b2f..c074790b0b30 100644 --- a/substrate/primitives/tracing/src/lib.rs +++ b/substrate/primitives/tracing/src/lib.rs @@ -17,7 +17,7 @@ //! Substrate tracing primitives and macros. //! -//! To trace functions or invidual code in Substrate, this crate provides [`within_span`] +//! To trace functions or individual code in Substrate, this crate provides [`within_span`] //! and [`enter_span`]. See the individual docs for how to use these macros. //! //! Note that to allow traces from wasm execution environment there are @@ -68,7 +68,7 @@ pub use crate::types::{WASM_NAME_KEY, WASM_TARGET_KEY, WASM_TRACE_IDENTIFIER}; /// directly as they yield nothing without the feature present. Instead you should use /// `enter_span!` and `within_span!` – which would strip away even any parameter conversion /// you do within the span-definition (and thus optimise your performance). For your -/// convineience you directly specify the `Level` and name of the span or use the full +/// convenience you directly specify the `Level` and name of the span or use the full /// feature set of `span!`/`span_*!` on it: /// /// # Example @@ -96,7 +96,7 @@ pub use crate::types::{WASM_NAME_KEY, WASM_TARGET_KEY, WASM_TRACE_IDENTIFIER}; /// This project only provides the macros and facilities to manage tracing /// it doesn't implement the tracing subscriber or backend directly – that is /// up to the developer integrating it into a specific environment. In native -/// this can and must be done through the regular `tracing`-facitilies, please +/// this can and must be done through the regular `tracing`-facilities, please /// see their documentation for details. /// /// On the wasm-side we've adopted a similar approach of having a global @@ -137,7 +137,7 @@ pub fn init_for_tests() { /// Runs given code within a tracing span, measuring it's execution time. /// /// If tracing is not enabled, the code is still executed. Pass in level and name or -/// use any valid `sp_tracing::Span`followe by `;` and the code to execute, +/// use any valid `sp_tracing::Span`followed by `;` and the code to execute, /// /// # Example /// diff --git a/substrate/primitives/tracing/src/types.rs b/substrate/primitives/tracing/src/types.rs index 77c8878a79cc..1141c3b58f4f 100644 --- a/substrate/primitives/tracing/src/types.rs +++ b/substrate/primitives/tracing/src/types.rs @@ -18,7 +18,7 @@ #[cfg(not(feature = "std"))] use alloc::{vec, vec::Vec}; use codec::{Decode, Encode}; -/// Types for wasm based tracing. Loosly inspired by `tracing-core` but +/// Types for wasm based tracing. Loosely inspired by `tracing-core` but /// optimised for the specific use case. use core::{fmt::Debug, format_args}; @@ -55,7 +55,7 @@ impl core::default::Default for WasmLevel { } } -/// A paramter value provided to the span/event +/// A parameter value provided to the span/event #[derive(Encode, Decode, Clone)] pub enum WasmValue { U8(u8), @@ -181,9 +181,9 @@ impl From for WasmValue { } } -/// The name of a field provided as the argument name when contstructing an +/// The name of a field provided as the argument name when constructing an /// `event!` or `span!`. -/// Generally generated automaticaly via `stringify` from an `'static &str`. +/// Generally generated automatically via `stringify` from an `'static &str`. /// Likely print-able. #[derive(Encode, Decode, Clone)] pub struct WasmFieldName(Vec); @@ -321,7 +321,7 @@ impl tracing_core::field::Visit for WasmValuesSet { self.0.push((field.name().into(), Some(WasmValue::from(value)))) } } -/// Metadata provides generic information about the specifc location of the +/// Metadata provides generic information about the specific location of the /// `span!` or `event!` call on the wasm-side. #[derive(Encode, Decode, Clone)] pub struct WasmMetadata { diff --git a/substrate/primitives/transaction-storage-proof/Cargo.toml b/substrate/primitives/transaction-storage-proof/Cargo.toml index fbd0a4752fca..137a232fce73 100644 --- a/substrate/primitives/transaction-storage-proof/Cargo.toml +++ b/substrate/primitives/transaction-storage-proof/Cargo.toml @@ -16,9 +16,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.74", optional = true } +async-trait = { version = "0.1.79", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-core = { path = "../core", optional = true } sp-inherents = { path = "../inherents", default-features = false } sp-runtime = { path = "../runtime", default-features = false } diff --git a/substrate/primitives/transaction-storage-proof/src/lib.rs b/substrate/primitives/transaction-storage-proof/src/lib.rs index 352f2aec26ee..893b2e33bee6 100644 --- a/substrate/primitives/transaction-storage-proof/src/lib.rs +++ b/substrate/primitives/transaction-storage-proof/src/lib.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Storage proof primitives. Constains types and basic code to extract storage +//! Storage proof primitives. Contains types and basic code to extract storage //! proofs for indexed transactions. #![cfg_attr(not(feature = "std"), no_std)] diff --git a/substrate/primitives/trie/Cargo.toml b/substrate/primitives/trie/Cargo.toml index dd7ab080e5f4..28c496d7a8e0 100644 --- a/substrate/primitives/trie/Cargo.toml +++ b/substrate/primitives/trie/Cargo.toml @@ -29,7 +29,7 @@ memory-db = { version = "0.32.0", default-features = false } nohash-hasher = { version = "0.2.0", optional = true } parking_lot = { version = "0.12.1", optional = true } rand = { version = "0.8", optional = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } thiserror = { optional = true, workspace = true } tracing = { version = "0.1.29", optional = true } trie-db = { version = "0.28.0", default-features = false } diff --git a/substrate/primitives/trie/src/cache/mod.rs b/substrate/primitives/trie/src/cache/mod.rs index 01f08a78adcf..32078169b503 100644 --- a/substrate/primitives/trie/src/cache/mod.rs +++ b/substrate/primitives/trie/src/cache/mod.rs @@ -323,7 +323,7 @@ type ValueAccessSet = /// /// This cache should be used per state instance created by the backend. One state instance is /// referring to the state of one block. It will cache all the accesses that are done to the state -/// which could not be fullfilled by the [`SharedTrieCache`]. These locally cached items are merged +/// which could not be fulfilled by the [`SharedTrieCache`]. These locally cached items are merged /// back to the shared trie cache when this instance is dropped. /// /// When using [`Self::as_trie_db_cache`] or [`Self::as_trie_db_mut_cache`], it will lock Mutexes. diff --git a/substrate/primitives/trie/src/lib.rs b/substrate/primitives/trie/src/lib.rs index a9904b1a59ed..62bb24082362 100644 --- a/substrate/primitives/trie/src/lib.rs +++ b/substrate/primitives/trie/src/lib.rs @@ -327,7 +327,7 @@ pub fn read_trie_value( +pub fn read_trie_first_descendant_value( db: &DB, root: &TrieHash, key: &[u8], @@ -448,7 +448,7 @@ where /// Read the [`trie_db::MerkleValue`] of the node that is the closest descendant for /// the provided child key. -pub fn read_child_trie_first_descedant_value( +pub fn read_child_trie_first_descendant_value( keyspace: &[u8], db: &DB, root: &TrieHash, diff --git a/substrate/primitives/version/Cargo.toml b/substrate/primitives/version/Cargo.toml index c1f109a84134..7c3b911032ff 100644 --- a/substrate/primitives/version/Cargo.toml +++ b/substrate/primitives/version/Cargo.toml @@ -20,7 +20,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } impl-serde = { version = "0.4.0", default-features = false, optional = true } parity-wasm = { version = "0.45", optional = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], optional = true, workspace = true } thiserror = { optional = true, workspace = true } sp-crypto-hashing-proc-macro = { path = "../crypto/hashing/proc-macro" } diff --git a/substrate/primitives/version/proc-macro/src/decl_runtime_version.rs b/substrate/primitives/version/proc-macro/src/decl_runtime_version.rs index 7ca2d9b71f60..3671d4aff6bb 100644 --- a/substrate/primitives/version/proc-macro/src/decl_runtime_version.rs +++ b/substrate/primitives/version/proc-macro/src/decl_runtime_version.rs @@ -52,7 +52,7 @@ fn decl_runtime_version_impl_inner(item: ItemConst) -> Result { /// enable `std` feature even for `no_std` wasm runtime builds. /// /// One difference from the original definition is the `apis` field. Since we don't actually parse -/// `apis` from this macro it will always be emitteed as empty. An empty vector can be encoded as +/// `apis` from this macro it will always be emitted as empty. An empty vector can be encoded as /// a zero-byte, thus `u8` is sufficient here. #[derive(Encode)] struct RuntimeVersion { diff --git a/substrate/primitives/version/src/lib.rs b/substrate/primitives/version/src/lib.rs index 6b4a2061c753..dd24e427b837 100644 --- a/substrate/primitives/version/src/lib.rs +++ b/substrate/primitives/version/src/lib.rs @@ -327,7 +327,7 @@ impl RuntimeVersion { /// /// For runtime with core api version less than 4, /// V0 trie version will be applied to state. - /// Otherwhise, V1 trie version will be use. + /// Otherwise, V1 trie version will be use. pub fn state_version(&self) -> StateVersion { // If version > than 1, keep using latest version. self.state_version.try_into().unwrap_or(StateVersion::V1) diff --git a/substrate/primitives/wasm-interface/Cargo.toml b/substrate/primitives/wasm-interface/Cargo.toml index 6c051b71c8e0..15a20fab5e5d 100644 --- a/substrate/primitives/wasm-interface/Cargo.toml +++ b/substrate/primitives/wasm-interface/Cargo.toml @@ -21,9 +21,9 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = impl-trait-for-tuples = "0.2.2" log = { optional = true, workspace = true, default-features = true } wasmtime = { version = "8.0.1", default-features = false, optional = true } -anyhow = { version = "1.0.68", optional = true } +anyhow = { version = "1.0.81", optional = true } [features] default = ["std"] -std = ["codec/std", "log/std", "wasmtime"] +std = ["codec/std", "log/std"] wasmtime = ["anyhow", "dep:wasmtime"] diff --git a/substrate/primitives/weights/Cargo.toml b/substrate/primitives/weights/Cargo.toml index 2aca4aecfe7f..d9bc14649968 100644 --- a/substrate/primitives/weights/Cargo.toml +++ b/substrate/primitives/weights/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] bounded-collections = { version = "0.2.0", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["alloc", "derive"], workspace = true } smallvec = "1.11.0" sp-arithmetic = { path = "../arithmetic", default-features = false } diff --git a/substrate/test-utils/Cargo.toml b/substrate/test-utils/Cargo.toml index af8b01cdef08..56b1c038199a 100644 --- a/substrate/test-utils/Cargo.toml +++ b/substrate/test-utils/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -futures = "0.3.16" +futures = "0.3.30" tokio = { version = "1.22.0", features = ["macros", "time"] } [dev-dependencies] diff --git a/substrate/test-utils/client/Cargo.toml b/substrate/test-utils/client/Cargo.toml index 349b04d32d7b..4e4e65314fc3 100644 --- a/substrate/test-utils/client/Cargo.toml +++ b/substrate/test-utils/client/Cargo.toml @@ -17,9 +17,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = "6.1" -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.6.1" } -futures = "0.3.21" +futures = "0.3.30" serde = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } sc-client-api = { path = "../../client/api" } diff --git a/substrate/test-utils/runtime/Cargo.toml b/substrate/test-utils/runtime/Cargo.toml index f49503da8ca5..ffbd59f39ad2 100644 --- a/substrate/test-utils/runtime/Cargo.toml +++ b/substrate/test-utils/runtime/Cargo.toml @@ -22,7 +22,7 @@ sp-consensus-babe = { path = "../../primitives/consensus/babe", default-features sp-genesis-builder = { path = "../../primitives/genesis-builder", default-features = false } sp-block-builder = { path = "../../primitives/block-builder", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-inherents = { path = "../../primitives/inherents", default-features = false } sp-keyring = { path = "../../primitives/keyring", default-features = false } sp-offchain = { path = "../../primitives/offchain", default-features = false } @@ -53,7 +53,7 @@ array-bytes = { version = "6.1", optional = true } log = { workspace = true } [dev-dependencies] -futures = "0.3.21" +futures = "0.3.30" sc-block-builder = { path = "../../client/block-builder" } sc-chain-spec = { path = "../../client/chain-spec" } sc-executor = { path = "../../client/executor" } diff --git a/substrate/test-utils/runtime/client/Cargo.toml b/substrate/test-utils/runtime/client/Cargo.toml index cbb964f67852..5ca24fea33ed 100644 --- a/substrate/test-utils/runtime/client/Cargo.toml +++ b/substrate/test-utils/runtime/client/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -futures = "0.3.21" +futures = "0.3.30" sc-block-builder = { path = "../../../client/block-builder" } sc-client-api = { path = "../../../client/api" } sc-consensus = { path = "../../../client/consensus/common" } diff --git a/substrate/test-utils/runtime/transaction-pool/Cargo.toml b/substrate/test-utils/runtime/transaction-pool/Cargo.toml index 33e56e8e55e7..9b52706c7399 100644 --- a/substrate/test-utils/runtime/transaction-pool/Cargo.toml +++ b/substrate/test-utils/runtime/transaction-pool/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } -futures = "0.3.21" +futures = "0.3.30" parking_lot = "0.12.1" thiserror = { workspace = true } sc-transaction-pool = { path = "../../../client/transaction-pool" } diff --git a/substrate/utils/binary-merkle-tree/Cargo.toml b/substrate/utils/binary-merkle-tree/Cargo.toml index 6ba515afee17..a89006d94dc1 100644 --- a/substrate/utils/binary-merkle-tree/Cargo.toml +++ b/substrate/utils/binary-merkle-tree/Cargo.toml @@ -18,7 +18,7 @@ hash-db = { version = "0.16.0", default-features = false } [dev-dependencies] array-bytes = "6.1" -env_logger = "0.9" +env_logger = "0.11" sp-core = { path = "../../primitives/core" } sp-runtime = { path = "../../primitives/runtime" } diff --git a/substrate/utils/fork-tree/src/lib.rs b/substrate/utils/fork-tree/src/lib.rs index cd175166b9cd..ff86467c85d5 100644 --- a/substrate/utils/fork-tree/src/lib.rs +++ b/substrate/utils/fork-tree/src/lib.rs @@ -683,7 +683,7 @@ where node.data }); - // Retain only roots that are descendents of the finalized block (this + // Retain only roots that are descendants of the finalized block (this // happens if the node has been properly finalized) or that are // ancestors (or equal) to the finalized block (in this case the node // wasn't finalized earlier presumably because the predicate didn't @@ -1168,7 +1168,7 @@ mod test { Ok(Some(false)), ); - // finalizing "E" is not allowed since there are not finalized anchestors. + // finalizing "E" is not allowed since there are not finalized ancestors. assert_eq!( tree.finalizes_any_with_descendent_if(&"E", 15, &is_descendent_of, |c| c.effective == 10), @@ -1309,7 +1309,7 @@ mod test { fn map_works() { let (mut tree, _) = test_fork_tree(); - // Extend the single root fork-tree to also excercise the roots order during map. + // Extend the single root fork-tree to also exercise the roots order during map. let is_descendent_of = |_: &&str, _: &&str| -> Result { Ok(false) }; let is_root = tree.import("A1", 10, 1, &is_descendent_of).unwrap(); assert!(is_root); diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs index c6ba28240167..5fbfc0530bb3 100644 --- a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs +++ b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs @@ -48,7 +48,7 @@ use std::{ }; /// Logging target -const LOG_TARGET: &'static str = "frame::benchmark::pallet"; +const LOG_TARGET: &'static str = "polkadot_sdk_frame::benchmark::pallet"; /// The inclusive range of a component. #[derive(Serialize, Debug, Clone, Eq, PartialEq)] @@ -415,7 +415,7 @@ impl PalletCmd { .map_err(|e| { format!("Error executing and verifying runtime benchmark: {}", e) })?; - // Dont use these results since verification code will add overhead. + // Don't use these results since verification code will add overhead. let _batch = , String> as Decode>::decode( &mut &result[..], @@ -437,7 +437,7 @@ impl PalletCmd { &pallet.clone(), &extrinsic.clone(), &selected_components.clone(), - false, // dont run verification code for final values + false, // don't run verification code for final values self.repeat, ) .encode(), @@ -469,7 +469,7 @@ impl PalletCmd { &pallet.clone(), &extrinsic.clone(), &selected_components.clone(), - false, // dont run verification code for final values + false, // don't run verification code for final values self.repeat, ) .encode(), diff --git a/substrate/utils/frame/generate-bags/src/lib.rs b/substrate/utils/frame/generate-bags/src/lib.rs index 923017261a44..62485c442d36 100644 --- a/substrate/utils/frame/generate-bags/src/lib.rs +++ b/substrate/utils/frame/generate-bags/src/lib.rs @@ -183,7 +183,7 @@ pub fn generate_thresholds( total_issuance: u128, minimum_balance: u128, ) -> Result<(), std::io::Error> { - // ensure the file is accessable + // ensure the file is accessible if let Some(parent) = output.parent() { if !parent.exists() { std::fs::create_dir_all(parent)?; diff --git a/substrate/utils/frame/remote-externalities/Cargo.toml b/substrate/utils/frame/remote-externalities/Cargo.toml index 61e0a861ee0e..82b019154832 100644 --- a/substrate/utils/frame/remote-externalities/Cargo.toml +++ b/substrate/utils/frame/remote-externalities/Cargo.toml @@ -26,7 +26,7 @@ sp-io = { path = "../../../primitives/io" } sp-runtime = { path = "../../../primitives/runtime" } tokio = { version = "1.22.0", features = ["macros", "rt-multi-thread"] } substrate-rpc-client = { path = "../rpc/client" } -futures = "0.3" +futures = "0.3.30" indicatif = "0.17.7" spinners = "4.1.0" tokio-retry = "0.3.0" diff --git a/substrate/utils/frame/remote-externalities/src/lib.rs b/substrate/utils/frame/remote-externalities/src/lib.rs index c7399468da9d..254d33deec80 100644 --- a/substrate/utils/frame/remote-externalities/src/lib.rs +++ b/substrate/utils/frame/remote-externalities/src/lib.rs @@ -762,7 +762,7 @@ where let mut sp = Spinner::with_timer(Spinners::Dots, "Inserting keys into DB...".into()); let start = Instant::now(); pending_ext.batch_insert(key_values.clone().into_iter().filter_map(|(k, v)| { - // Don't insert the child keys here, they need to be inserted seperately with all their + // Don't insert the child keys here, they need to be inserted separately with all their // data in the load_child_remote function. match is_default_child_storage_key(&k.0) { true => None, diff --git a/substrate/utils/frame/rpc/client/Cargo.toml b/substrate/utils/frame/rpc/client/Cargo.toml index b51e3f44f4e6..501bb95b2579 100644 --- a/substrate/utils/frame/rpc/client/Cargo.toml +++ b/substrate/utils/frame/rpc/client/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] jsonrpsee = { version = "0.22", features = ["ws-client"] } sc-rpc-api = { path = "../../../../client/rpc-api" } -async-trait = "0.1.74" +async-trait = "0.1.79" serde = { workspace = true, default-features = true } sp-runtime = { path = "../../../../primitives/runtime" } log = { workspace = true, default-features = true } diff --git a/substrate/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs b/substrate/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs index f45258ea593d..c0333bb7dac0 100644 --- a/substrate/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs +++ b/substrate/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs @@ -132,7 +132,7 @@ pub trait StateMigrationApi { /// Check current migration state. /// /// This call is performed locally without submitting any transactions. Thus executing this - /// won't change any state. Nonetheless it is a VERY costy call that should be + /// won't change any state. Nonetheless it is a VERY costly call that should be /// only exposed to trusted peers. #[method(name = "state_trieMigrationStatus")] fn call(&self, at: Option) -> RpcResult; diff --git a/substrate/utils/frame/rpc/support/Cargo.toml b/substrate/utils/frame/rpc/support/Cargo.toml index 2e4bb6a10578..84db06da7b0d 100644 --- a/substrate/utils/frame/rpc/support/Cargo.toml +++ b/substrate/utils/frame/rpc/support/Cargo.toml @@ -23,9 +23,9 @@ sc-rpc-api = { path = "../../../../client/rpc-api" } sp-storage = { path = "../../../../primitives/storage" } [dev-dependencies] -scale-info = "2.10.0" +scale-info = "2.11.1" jsonrpsee = { version = "0.22", features = ["jsonrpsee-types", "ws-client"] } -tokio = "1.22.0" +tokio = "1.37" sp-core = { path = "../../../../primitives/core" } sp-runtime = { path = "../../../../primitives/runtime" } frame-system = { path = "../../../../frame/system" } diff --git a/substrate/utils/frame/rpc/support/src/lib.rs b/substrate/utils/frame/rpc/support/src/lib.rs index a839bbc34021..dea822167ff3 100644 --- a/substrate/utils/frame/rpc/support/src/lib.rs +++ b/substrate/utils/frame/rpc/support/src/lib.rs @@ -161,7 +161,7 @@ impl StorageQuery { /// Send this query over RPC, await the typed result. /// - /// Hash should be `::Hash`. + /// Hash should be `::Hash`. /// /// # Arguments /// diff --git a/substrate/utils/frame/rpc/system/Cargo.toml b/substrate/utils/frame/rpc/system/Cargo.toml index f9a84a01af82..9e571bef9d04 100644 --- a/substrate/utils/frame/rpc/system/Cargo.toml +++ b/substrate/utils/frame/rpc/system/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } jsonrpsee = { version = "0.22", features = ["client-core", "macros", "server"] } -futures = "0.3.21" +futures = "0.3.30" log = { workspace = true, default-features = true } frame-system-rpc-runtime-api = { path = "../../../../frame/system/rpc/runtime-api" } sc-rpc-api = { path = "../../../../client/rpc-api" } @@ -31,7 +31,7 @@ sp-runtime = { path = "../../../../primitives/runtime" } [dev-dependencies] sc-transaction-pool = { path = "../../../../client/transaction-pool" } -tokio = "1.22.0" +tokio = "1.37" assert_matches = "1.3.0" sp-tracing = { path = "../../../../primitives/tracing" } substrate-test-runtime-client = { path = "../../../../test-utils/runtime/client" } diff --git a/substrate/utils/frame/try-runtime/cli/Cargo.toml b/substrate/utils/frame/try-runtime/cli/Cargo.toml index 3ff57745a2db..618cb645475d 100644 --- a/substrate/utils/frame/try-runtime/cli/Cargo.toml +++ b/substrate/utils/frame/try-runtime/cli/Cargo.toml @@ -37,7 +37,7 @@ sp-weights = { path = "../../../../primitives/weights" } frame-try-runtime = { path = "../../../../frame/try-runtime", optional = true } substrate-rpc-client = { path = "../../rpc/client" } -async-trait = "0.1.74" +async-trait = "0.1.79" clap = { version = "4.5.3", features = ["derive"] } hex = { version = "0.4.3", default-features = false } log = { workspace = true, default-features = true } @@ -52,7 +52,7 @@ node-primitives = { path = "../../../../bin/node/primitives" } regex = "1.7.3" substrate-cli-test-utils = { path = "../../../../test-utils/cli" } tempfile = "3.1.0" -tokio = "1.27.0" +tokio = "1.37" [features] try-runtime = [ diff --git a/substrate/utils/substrate-bip39/README.md b/substrate/utils/substrate-bip39/README.md index 368d18f406bb..e7a80ca5f2cf 100644 --- a/substrate/utils/substrate-bip39/README.md +++ b/substrate/utils/substrate-bip39/README.md @@ -21,12 +21,12 @@ to wallets providing their own dictionaries and checksum mechanism. Issues with to CSPRNG supplied dictionary phrases. 2. Providing own dictionaries felt into the _you ain't gonna need it_ anti-pattern category on day 1. Wallet providers - (be it hardware or software) typically want their products to be compatibile with other wallets so that users can + (be it hardware or software) typically want their products to be compatible with other wallets so that users can migrate to their product without having to migrate all their assets. To achieve the above phrases have to be precisely encoded in _The One True Canonical Encoding_, for which UTF-8 NFKD was chosen. This is largely irrelevant (and even ignored) for English phrases, as they encode to basically just ASCII in -virtualy every character encoding known to mankind, but immedietly becomes a problem for dictionaries that do use +virtually every character encoding known to mankind, but immediately becomes a problem for dictionaries that do use non-ASCII characters. Even if the right encoding is used and implemented correctly, there are still [other caveats present for some non-english dictionaries](https://github.com/bitcoin/bips/blob/master/bip-0039/bip-0039-wordlists.md), such as normalizing spaces to a canonical form, or making some latin based characters equivalent to their base in @@ -34,8 +34,8 @@ dictionary lookups (eg. Spanish `ñ` and `n` are meant to be interchangeable). T headache, and opens doors for disagreements between buggy implementations, breaking compatibility. BIP39 does already provide a form of the mnemonic that is free from all of these issues: the entropy byte array. Since -veryfing the checksum requires that we recover the entropy from which the phrase was generated, no extra work is -actually needed here. Wallet implementators can encode the dictionaries in whatever encoding they find convenient (as +verifying the checksum requires that we recover the entropy from which the phrase was generated, no extra work is +actually needed here. Wallet implementors can encode the dictionaries in whatever encoding they find convenient (as long as they are the standard BIP39 dictionaries), no harm in using UTF-16 string primitives that Java and JavaScript provide. Since the dictionary is fixed and known, and the checksum is done on the entropy itself, the exact character encoding used becomes irrelevant, as are the precise codepoints and amount of whitespace around the words. It is thus diff --git a/substrate/utils/substrate-bip39/src/lib.rs b/substrate/utils/substrate-bip39/src/lib.rs index 3673d20faed6..5b68bef0c390 100644 --- a/substrate/utils/substrate-bip39/src/lib.rs +++ b/substrate/utils/substrate-bip39/src/lib.rs @@ -43,7 +43,7 @@ pub enum Error { /// /// Any other length will return an error. /// -/// `password` is analog to BIP39 seed generation itself, with an empty string being defalt. +/// `password` is analog to BIP39 seed generation itself, with an empty string being default. pub fn mini_secret_from_entropy(entropy: &[u8], password: &str) -> Result { let seed = seed_from_entropy(entropy, password)?; Ok(MiniSecretKey::from_bytes(&seed[..32]).expect("Length is always correct; qed")) diff --git a/substrate/utils/wasm-builder/Cargo.toml b/substrate/utils/wasm-builder/Cargo.toml index 7abd1a202848..bac323e2e6a0 100644 --- a/substrate/utils/wasm-builder/Cargo.toml +++ b/substrate/utils/wasm-builder/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] build-helper = "0.1.1" cargo_metadata = "0.15.4" console = "0.15.8" -strum = { version = "0.24.1", features = ["derive"] } +strum = { version = "0.26.2", features = ["derive"] } tempfile = "3.1.0" toml = "0.8.8" walkdir = "2.4.0" diff --git a/substrate/utils/wasm-builder/src/lib.rs b/substrate/utils/wasm-builder/src/lib.rs index 5cde48c0950b..178e499e8f5b 100644 --- a/substrate/utils/wasm-builder/src/lib.rs +++ b/substrate/utils/wasm-builder/src/lib.rs @@ -207,7 +207,7 @@ fn get_cargo_command(target: RuntimeTarget) -> CargoCommand { } else { // If no command before provided us with a cargo that supports our Substrate wasm env, we // try to search one with rustup. If that fails as well, we return the default cargo and let - // the prequisities check fail. + // the perquisites check fail. get_rustup_command(target).unwrap_or(default_cargo) } } diff --git a/substrate/utils/wasm-builder/src/wasm_project.rs b/substrate/utils/wasm-builder/src/wasm_project.rs index 98b6b302e8e0..5f813512cbaa 100644 --- a/substrate/utils/wasm-builder/src/wasm_project.rs +++ b/substrate/utils/wasm-builder/src/wasm_project.rs @@ -694,7 +694,7 @@ impl BuildConfiguration { /// "production". It would only contain the builtin profile where the custom profile /// inherits from. This is why we inspect the build path to learn which profile is used. /// - /// When not overriden by a env variable we always default to building wasm with the `Release` + /// When not overridden by a env variable we always default to building wasm with the `Release` /// profile even when the main build uses the debug build. This is because wasm built with the /// `Debug` profile is too slow for normal development activities and almost never intended. /// @@ -703,9 +703,9 @@ impl BuildConfiguration { /// /// # Note /// - /// Can be overriden by setting [`crate::WASM_BUILD_TYPE_ENV`]. + /// Can be overridden by setting [`crate::WASM_BUILD_TYPE_ENV`]. fn detect(target: RuntimeTarget, wasm_project: &Path) -> Self { - let (name, overriden) = if let Ok(name) = env::var(crate::WASM_BUILD_TYPE_ENV) { + let (name, overridden) = if let Ok(name) = env::var(crate::WASM_BUILD_TYPE_ENV) { (name, true) } else { // First go backwards to the beginning of the target directory. @@ -730,14 +730,14 @@ impl BuildConfiguration { (name, false) }; let outer_build_profile = Profile::iter().find(|p| p.directory() == name); - let blob_build_profile = match (outer_build_profile.clone(), overriden) { - // When not overriden by a env variable we default to using the `Release` profile + let blob_build_profile = match (outer_build_profile.clone(), overridden) { + // When not overridden by a env variable we default to using the `Release` profile // for the wasm build even when the main build uses the debug build. This // is because the `Debug` profile is too slow for normal development activities. (Some(Profile::Debug), false) => Profile::Release, - // For any other profile or when overriden we take it at face value. + // For any other profile or when overridden we take it at face value. (Some(profile), _) => profile, - // For non overriden unknown profiles we fall back to `Release`. + // For non overridden unknown profiles we fall back to `Release`. // This allows us to continue building when a custom profile is used for the // main builds cargo. When explicitly passing a profile via env variable we are // not doing a fallback. diff --git a/templates/minimal/node/Cargo.toml b/templates/minimal/node/Cargo.toml index 65aa1e207520..606fd0580356 100644 --- a/templates/minimal/node/Cargo.toml +++ b/templates/minimal/node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "minimal-template-node" -description = "A miniaml Substrate-based Substrate node, ready for hacking." +description = "A minimal Substrate-based Substrate node, ready for hacking." version = "0.0.0" license = "MIT-0" authors.workspace = true @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] clap = { version = "4.5.3", features = ["derive"] } -futures = { version = "0.3.21", features = ["thread-pool"] } +futures = { version = "0.3.30", features = ["thread-pool"] } futures-timer = "3.0.1" jsonrpsee = { version = "0.22", features = ["server"] } serde_json = { workspace = true, default-features = true } @@ -49,7 +49,7 @@ substrate-frame-rpc-system = { path = "../../../substrate/utils/frame/rpc/system # Once the native runtime is gone, there should be little to no dependency on FRAME here, and # certainly no dependency on the runtime. -frame = { path = "../../../substrate/frame", features = [ +frame = { package = "polkadot-sdk-frame", path = "../../../substrate/frame", features = [ "experimental", "runtime", ] } diff --git a/templates/minimal/pallets/template/Cargo.toml b/templates/minimal/pallets/template/Cargo.toml index 9982e5ea53bc..909ba0344548 100644 --- a/templates/minimal/pallets/template/Cargo.toml +++ b/templates/minimal/pallets/template/Cargo.toml @@ -19,10 +19,10 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive", ], default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = [ +scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } -frame = { path = "../../../../substrate/frame", default-features = false, features = [ +frame = { package = "polkadot-sdk-frame", path = "../../../../substrate/frame", default-features = false, features = [ "experimental", "runtime", ] } diff --git a/templates/minimal/runtime/Cargo.toml b/templates/minimal/runtime/Cargo.toml index 20ffb706eb49..e7f88ca47af7 100644 --- a/templates/minimal/runtime/Cargo.toml +++ b/templates/minimal/runtime/Cargo.toml @@ -17,7 +17,7 @@ parity-scale-codec = { version = "3.0.0", default-features = false } scale-info = { version = "2.6.0", default-features = false } # this is a frame-based runtime, thus importing `frame` with runtime feature enabled. -frame = { path = "../../../substrate/frame", default-features = false, features = [ +frame = { package = "polkadot-sdk-frame", path = "../../../substrate/frame", default-features = false, features = [ "experimental", "runtime", ] } @@ -29,7 +29,7 @@ pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-featur pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } -# genesis builder that allows us to interacto with runtime genesis config +# genesis builder that allows us to interact with runtime genesis config sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } # local pallet templates diff --git a/templates/parachain/node/src/service.rs b/templates/parachain/node/src/service.rs index 4dd24803e9b1..7e7bf1726b5e 100644 --- a/templates/parachain/node/src/service.rs +++ b/templates/parachain/node/src/service.rs @@ -6,8 +6,8 @@ use std::{sync::Arc, time::Duration}; use cumulus_client_cli::CollatorOptions; // Local Runtime Types use parachain_template_runtime::{ + apis::RuntimeApi, opaque::{Block, Hash}, - RuntimeApi, }; // Cumulus Imports @@ -16,7 +16,8 @@ use cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImpo use cumulus_client_consensus_proposer::Proposer; use cumulus_client_service::{ build_network, build_relay_chain_interface, prepare_node_config, start_relay_chain_tasks, - BuildNetworkParams, CollatorSybilResistance, DARecoveryProfile, StartRelayChainTasksParams, + BuildNetworkParams, CollatorSybilResistance, DARecoveryProfile, ParachainHostFunctions, + StartRelayChainTasksParams, }; use cumulus_primitives_core::{relay_chain::CollatorPair, ParaId}; use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface}; @@ -25,9 +26,7 @@ use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface}; use frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE; use sc_client_api::Backend; use sc_consensus::ImportQueue; -use sc_executor::{ - HeapAllocStrategy, NativeElseWasmExecutor, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY, -}; +use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY}; use sc_network::NetworkBlock; use sc_network_sync::SyncingService; use sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager}; @@ -36,25 +35,7 @@ use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sp_keystore::KeystorePtr; use substrate_prometheus_endpoint::Registry; -/// Native executor type. -pub struct ParachainNativeExecutor; - -impl sc_executor::NativeExecutionDispatch for ParachainNativeExecutor { - type ExtendHostFunctions = ( - cumulus_client_service::storage_proof_size::HostFunctions, - frame_benchmarking::benchmarking::HostFunctions, - ); - - fn dispatch(method: &str, data: &[u8]) -> Option> { - parachain_template_runtime::api::dispatch(method, data) - } - - fn native_version() -> sc_executor::NativeVersion { - parachain_template_runtime::native_version() - } -} - -type ParachainExecutor = NativeElseWasmExecutor; +type ParachainExecutor = WasmExecutor; type ParachainClient = TFullClient; @@ -92,7 +73,7 @@ pub fn new_partial(config: &Configuration) -> Result .default_heap_pages .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static { extra_pages: h as _ }); - let wasm = WasmExecutor::builder() + let executor = ParachainExecutor::builder() .with_execution_method(config.wasm_method) .with_onchain_heap_alloc_strategy(heap_pages) .with_offchain_heap_alloc_strategy(heap_pages) @@ -100,8 +81,6 @@ pub fn new_partial(config: &Configuration) -> Result .with_runtime_cache_size(config.runtime_cache_size) .build(); - let executor = ParachainExecutor::new_with_wasm_executor(wasm); - let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts_record_import::( config, diff --git a/templates/parachain/pallets/template/Cargo.toml b/templates/parachain/pallets/template/Cargo.toml index 89eb9d517163..199da2f12d2c 100644 --- a/templates/parachain/pallets/template/Cargo.toml +++ b/templates/parachain/pallets/template/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = [ +scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } diff --git a/templates/parachain/runtime/Cargo.toml b/templates/parachain/runtime/Cargo.toml index b6a8e60e5ae1..2e26ae1d9898 100644 --- a/templates/parachain/runtime/Cargo.toml +++ b/templates/parachain/runtime/Cargo.toml @@ -24,7 +24,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = ] } hex-literal = { version = "0.4.1", optional = true } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = [ +scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } smallvec = "1.11.0" diff --git a/templates/parachain/runtime/src/apis.rs b/templates/parachain/runtime/src/apis.rs new file mode 100644 index 000000000000..74c7476e1520 --- /dev/null +++ b/templates/parachain/runtime/src/apis.rs @@ -0,0 +1,275 @@ +// This is free and unencumbered software released into the public domain. +// +// Anyone is free to copy, modify, publish, use, compile, sell, or +// distribute this software, either in source code form or as a compiled +// binary, for any purpose, commercial or non-commercial, and by any +// means. +// +// In jurisdictions that recognize copyright laws, the author or authors +// of this software dedicate any and all copyright interest in the +// software to the public domain. We make this dedication for the benefit +// of the public at large and to the detriment of our heirs and +// successors. We intend this dedication to be an overt act of +// relinquishment in perpetuity of all present and future rights to this +// software under copyright law. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +// IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +// OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +// OTHER DEALINGS IN THE SOFTWARE. +// +// For more information, please refer to + +// External crates imports +use frame_support::{ + genesis_builder_helper::{build_config, create_default_config}, + weights::Weight, +}; +use pallet_aura::Authorities; +use sp_api::impl_runtime_apis; +use sp_consensus_aura::sr25519::AuthorityId as AuraId; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; +use sp_runtime::{ + traits::Block as BlockT, + transaction_validity::{TransactionSource, TransactionValidity}, + ApplyExtrinsicResult, +}; +use sp_std::prelude::Vec; +use sp_version::RuntimeVersion; + +// Local module imports +use super::{ + AccountId, Aura, Balance, Block, Executive, InherentDataExt, Nonce, ParachainSystem, Runtime, + RuntimeCall, RuntimeGenesisConfig, SessionKeys, System, TransactionPayment, VERSION, +}; + +impl_runtime_apis! { + impl sp_consensus_aura::AuraApi for Runtime { + fn slot_duration() -> sp_consensus_aura::SlotDuration { + sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) + } + + fn authorities() -> Vec { + Authorities::::get().into_inner() + } + } + + impl sp_api::Core for Runtime { + fn version() -> RuntimeVersion { + VERSION + } + + fn execute_block(block: Block) { + Executive::execute_block(block) + } + + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { + Executive::initialize_block(header) + } + } + + impl sp_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + OpaqueMetadata::new(Runtime::metadata().into()) + } + + fn metadata_at_version(version: u32) -> Option { + Runtime::metadata_at_version(version) + } + + fn metadata_versions() -> sp_std::vec::Vec { + Runtime::metadata_versions() + } + } + + impl sp_block_builder::BlockBuilder for Runtime { + fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { + Executive::apply_extrinsic(extrinsic) + } + + fn finalize_block() -> ::Header { + Executive::finalize_block() + } + + fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { + data.create_extrinsics() + } + + fn check_inherents( + block: Block, + data: sp_inherents::InherentData, + ) -> sp_inherents::CheckInherentsResult { + data.check_extrinsics(&block) + } + } + + impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { + fn validate_transaction( + source: TransactionSource, + tx: ::Extrinsic, + block_hash: ::Hash, + ) -> TransactionValidity { + Executive::validate_transaction(source, tx, block_hash) + } + } + + impl sp_offchain::OffchainWorkerApi for Runtime { + fn offchain_worker(header: &::Header) { + Executive::offchain_worker(header) + } + } + + impl sp_session::SessionKeys for Runtime { + fn generate_session_keys(seed: Option>) -> Vec { + SessionKeys::generate(seed) + } + + fn decode_session_keys( + encoded: Vec, + ) -> Option, KeyTypeId)>> { + SessionKeys::decode_into_raw_public_keys(&encoded) + } + } + + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { + fn account_nonce(account: AccountId) -> Nonce { + System::account_nonce(account) + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi for Runtime { + fn query_info( + uxt: ::Extrinsic, + len: u32, + ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo { + TransactionPayment::query_info(uxt, len) + } + fn query_fee_details( + uxt: ::Extrinsic, + len: u32, + ) -> pallet_transaction_payment::FeeDetails { + TransactionPayment::query_fee_details(uxt, len) + } + fn query_weight_to_fee(weight: Weight) -> Balance { + TransactionPayment::weight_to_fee(weight) + } + fn query_length_to_fee(length: u32) -> Balance { + TransactionPayment::length_to_fee(length) + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi + for Runtime + { + fn query_call_info( + call: RuntimeCall, + len: u32, + ) -> pallet_transaction_payment::RuntimeDispatchInfo { + TransactionPayment::query_call_info(call, len) + } + fn query_call_fee_details( + call: RuntimeCall, + len: u32, + ) -> pallet_transaction_payment::FeeDetails { + TransactionPayment::query_call_fee_details(call, len) + } + fn query_weight_to_fee(weight: Weight) -> Balance { + TransactionPayment::weight_to_fee(weight) + } + fn query_length_to_fee(length: u32) -> Balance { + TransactionPayment::length_to_fee(length) + } + } + + impl cumulus_primitives_core::CollectCollationInfo for Runtime { + fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { + ParachainSystem::collect_collation_info(header) + } + } + + #[cfg(feature = "try-runtime")] + impl frame_try_runtime::TryRuntime for Runtime { + fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { + use super::configs::RuntimeBlockWeights; + + let weight = Executive::try_runtime_upgrade(checks).unwrap(); + (weight, RuntimeBlockWeights::get().max_block) + } + + fn execute_block( + block: Block, + state_root_check: bool, + signature_check: bool, + select: frame_try_runtime::TryStateSelect, + ) -> Weight { + // NOTE: intentional unwrap: we don't want to propagate the error backwards, and want to + // have a backtrace here. + Executive::try_execute_block(block, state_root_check, signature_check, select).unwrap() + } + } + + #[cfg(feature = "runtime-benchmarks")] + impl frame_benchmarking::Benchmark for Runtime { + fn benchmark_metadata(extra: bool) -> ( + Vec, + Vec, + ) { + use frame_benchmarking::{Benchmarking, BenchmarkList}; + use frame_support::traits::StorageInfoTrait; + use frame_system_benchmarking::Pallet as SystemBench; + use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use super::*; + + let mut list = Vec::::new(); + list_benchmarks!(list, extra); + + let storage_info = AllPalletsWithSystem::storage_info(); + (list, storage_info) + } + + fn dispatch_benchmark( + config: frame_benchmarking::BenchmarkConfig + ) -> Result, sp_runtime::RuntimeString> { + use frame_benchmarking::{BenchmarkError, Benchmarking, BenchmarkBatch}; + use super::*; + + use frame_system_benchmarking::Pallet as SystemBench; + impl frame_system_benchmarking::Config for Runtime { + fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { + ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); + Ok(()) + } + + fn verify_set_code() { + System::assert_last_event(cumulus_pallet_parachain_system::Event::::ValidationFunctionStored.into()); + } + } + + use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + impl cumulus_pallet_session_benchmarking::Config for Runtime {} + + use frame_support::traits::WhitelistedStorageKeys; + let whitelist = AllPalletsWithSystem::whitelisted_storage_keys(); + + let mut batches = Vec::::new(); + let params = (&config, &whitelist); + add_benchmarks!(params, batches); + + if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) } + Ok(batches) + } + } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + create_default_config::() + } + + fn build_config(config: Vec) -> sp_genesis_builder::Result { + build_config::(config) + } + } +} diff --git a/templates/parachain/runtime/src/configs/mod.rs b/templates/parachain/runtime/src/configs/mod.rs new file mode 100644 index 000000000000..e2c51e07d37b --- /dev/null +++ b/templates/parachain/runtime/src/configs/mod.rs @@ -0,0 +1,311 @@ +// This is free and unencumbered software released into the public domain. +// +// Anyone is free to copy, modify, publish, use, compile, sell, or +// distribute this software, either in source code form or as a compiled +// binary, for any purpose, commercial or non-commercial, and by any +// means. +// +// In jurisdictions that recognize copyright laws, the author or authors +// of this software dedicate any and all copyright interest in the +// software to the public domain. We make this dedication for the benefit +// of the public at large and to the detriment of our heirs and +// successors. We intend this dedication to be an overt act of +// relinquishment in perpetuity of all present and future rights to this +// software under copyright law. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +// IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +// OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +// OTHER DEALINGS IN THE SOFTWARE. +// +// For more information, please refer to + +mod xcm_config; + +// Substrate and Polkadot dependencies +use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; +use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; +use frame_support::{ + derive_impl, + dispatch::DispatchClass, + parameter_types, + traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, TransformOrigin}, + weights::{ConstantMultiplier, Weight}, + PalletId, +}; +use frame_system::{ + limits::{BlockLength, BlockWeights}, + EnsureRoot, +}; +use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; +use parachains_common::message_queue::{NarrowOriginToSibling, ParaIdToSibling}; +use polkadot_runtime_common::{ + xcm_sender::NoPriceForMessageDelivery, BlockHashCount, SlowAdjustingFeeUpdate, +}; +use sp_consensus_aura::sr25519::AuthorityId as AuraId; +use sp_runtime::Perbill; +use sp_version::RuntimeVersion; +use xcm::latest::prelude::BodyId; + +// Local module imports +use super::{ + weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}, + AccountId, Aura, Balance, Balances, Block, BlockNumber, CollatorSelection, Hash, MessageQueue, + Nonce, PalletInfo, ParachainSystem, Runtime, RuntimeCall, RuntimeEvent, RuntimeFreezeReason, + RuntimeHoldReason, RuntimeOrigin, RuntimeTask, Session, SessionKeys, System, WeightToFee, + XcmpQueue, AVERAGE_ON_INITIALIZE_RATIO, BLOCK_PROCESSING_VELOCITY, EXISTENTIAL_DEPOSIT, HOURS, + MAXIMUM_BLOCK_WEIGHT, MICROUNIT, NORMAL_DISPATCH_RATIO, RELAY_CHAIN_SLOT_DURATION_MILLIS, + SLOT_DURATION, UNINCLUDED_SEGMENT_CAPACITY, VERSION, +}; +use xcm_config::{RelayLocation, XcmOriginToTransactDispatchOrigin}; + +parameter_types! { + pub const Version: RuntimeVersion = VERSION; + + // This part is copied from Substrate's `bin/node/runtime/src/lib.rs`. + // The `RuntimeBlockLength` and `RuntimeBlockWeights` exist here because the + // `DeletionWeightLimit` and `DeletionQueueDepth` depend on those to parameterize + // the lazy contract deletion. + pub RuntimeBlockLength: BlockLength = + BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); + pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() + .base_block(BlockExecutionWeight::get()) + .for_class(DispatchClass::all(), |weights| { + weights.base_extrinsic = ExtrinsicBaseWeight::get(); + }) + .for_class(DispatchClass::Normal, |weights| { + weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); + }) + .for_class(DispatchClass::Operational, |weights| { + weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); + // Operational transactions have some extra reserved space, so that they + // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. + weights.reserved = Some( + MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT + ); + }) + .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) + .build_or_panic(); + pub const SS58Prefix: u16 = 42; +} + +/// The default types are being injected by [`derive_impl`](`frame_support::derive_impl`) from +/// [`ParaChainDefaultConfig`](`struct@frame_system::config_preludes::ParaChainDefaultConfig`), +/// but overridden as needed. +#[derive_impl(frame_system::config_preludes::ParaChainDefaultConfig)] +impl frame_system::Config for Runtime { + /// The identifier used to distinguish between accounts. + type AccountId = AccountId; + /// The index type for storing how many extrinsics an account has signed. + type Nonce = Nonce; + /// The type for hashing blocks and tries. + type Hash = Hash; + /// The block type. + type Block = Block; + /// Maximum number of block number to block hash mappings to keep (oldest pruned first). + type BlockHashCount = BlockHashCount; + /// Runtime version. + type Version = Version; + /// The data to be stored in an account. + type AccountData = pallet_balances::AccountData; + /// The weight of database operations that the runtime can invoke. + type DbWeight = RocksDbWeight; + /// Block & extrinsics weights: base values and limits. + type BlockWeights = RuntimeBlockWeights; + /// The maximum length of a block (in bytes). + type BlockLength = RuntimeBlockLength; + /// This is used as an identifier of the chain. 42 is the generic substrate prefix. + type SS58Prefix = SS58Prefix; + /// The action to take on a Runtime Upgrade + type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +impl pallet_timestamp::Config for Runtime { + /// A timestamp: milliseconds since the unix epoch. + type Moment = u64; + type OnTimestampSet = Aura; + type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>; + type WeightInfo = (); +} + +impl pallet_authorship::Config for Runtime { + type FindAuthor = pallet_session::FindAccountFromAuthorIndex; + type EventHandler = (CollatorSelection,); +} + +parameter_types! { + pub const ExistentialDeposit: Balance = EXISTENTIAL_DEPOSIT; +} + +impl pallet_balances::Config for Runtime { + type MaxLocks = ConstU32<50>; + /// The type for recording an account's balance. + type Balance = Balance; + /// The ubiquitous event type. + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = pallet_balances::weights::SubstrateWeight; + type MaxReserves = ConstU32<50>; + type ReserveIdentifier = [u8; 8]; + type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; + type FreezeIdentifier = (); + type MaxFreezes = ConstU32<0>; +} + +parameter_types! { + /// Relay Chain `TransactionByteFee` / 10 + pub const TransactionByteFee: Balance = 10 * MICROUNIT; +} + +impl pallet_transaction_payment::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type OnChargeTransaction = pallet_transaction_payment::CurrencyAdapter; + type WeightToFee = WeightToFee; + type LengthToFee = ConstantMultiplier; + type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type OperationalFeeMultiplier = ConstU8<5>; +} + +impl pallet_sudo::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type WeightInfo = (); +} + +parameter_types! { + pub const ReservedXcmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); + pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); + pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; +} + +impl cumulus_pallet_parachain_system::Config for Runtime { + type WeightInfo = (); + type RuntimeEvent = RuntimeEvent; + type OnSystemEvent = (); + type SelfParaId = parachain_info::Pallet; + type OutboundXcmpMessageSource = XcmpQueue; + type DmpQueue = frame_support::traits::EnqueueWithOrigin; + type ReservedDmpWeight = ReservedDmpWeight; + type XcmpMessageHandler = XcmpQueue; + type ReservedXcmpWeight = ReservedXcmpWeight; + type CheckAssociatedRelayNumber = RelayNumberStrictlyIncreases; + type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< + Runtime, + RELAY_CHAIN_SLOT_DURATION_MILLIS, + BLOCK_PROCESSING_VELOCITY, + UNINCLUDED_SEGMENT_CAPACITY, + >; +} + +impl parachain_info::Config for Runtime {} + +parameter_types! { + pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; +} + +impl pallet_message_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = (); + #[cfg(feature = "runtime-benchmarks")] + type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< + cumulus_primitives_core::AggregateMessageOrigin, + >; + #[cfg(not(feature = "runtime-benchmarks"))] + type MessageProcessor = xcm_builder::ProcessXcmMessage< + AggregateMessageOrigin, + xcm_executor::XcmExecutor, + RuntimeCall, + >; + type Size = u32; + // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: + type QueueChangeHandler = NarrowOriginToSibling; + type QueuePausedQuery = NarrowOriginToSibling; + type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type MaxStale = sp_core::ConstU32<8>; + type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = (); +} + +impl cumulus_pallet_aura_ext::Config for Runtime {} + +impl cumulus_pallet_xcmp_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type ChannelInfo = ParachainSystem; + type VersionWrapper = (); + // Enqueue XCMP messages from siblings for later processing. + type XcmpQueue = TransformOrigin; + type MaxInboundSuspended = sp_core::ConstU32<1_000>; + type ControllerOrigin = EnsureRoot; + type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; + type WeightInfo = (); + type PriceForSiblingDelivery = NoPriceForMessageDelivery; +} + +parameter_types! { + pub const Period: u32 = 6 * HOURS; + pub const Offset: u32 = 0; +} + +impl pallet_session::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type ValidatorId = ::AccountId; + // we don't have stash and controller, thus we don't need the convert as well. + type ValidatorIdOf = pallet_collator_selection::IdentityCollator; + type ShouldEndSession = pallet_session::PeriodicSessions; + type NextSessionRotation = pallet_session::PeriodicSessions; + type SessionManager = CollatorSelection; + // Essentially just Aura, but let's be pedantic. + type SessionHandler = ::KeyTypeIdProviders; + type Keys = SessionKeys; + type WeightInfo = (); +} + +impl pallet_aura::Config for Runtime { + type AuthorityId = AuraId; + type DisabledValidators = (); + type MaxAuthorities = ConstU32<100_000>; + type AllowMultipleBlocksPerSlot = ConstBool; + type SlotDuration = pallet_aura::MinimumPeriodTimesTwo; +} + +parameter_types! { + pub const PotId: PalletId = PalletId(*b"PotStake"); + pub const SessionLength: BlockNumber = 6 * HOURS; + // StakingAdmin pluralistic body. + pub const StakingAdminBodyId: BodyId = BodyId::Defense; +} + +/// We allow root and the StakingAdmin to execute privileged collator selection operations. +pub type CollatorSelectionUpdateOrigin = EitherOfDiverse< + EnsureRoot, + EnsureXcm>, +>; + +impl pallet_collator_selection::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type UpdateOrigin = CollatorSelectionUpdateOrigin; + type PotId = PotId; + type MaxCandidates = ConstU32<100>; + type MinEligibleCollators = ConstU32<4>; + type MaxInvulnerables = ConstU32<20>; + // should be a multiple of session or things will get inconsistent + type KickThreshold = Period; + type ValidatorId = ::AccountId; + type ValidatorIdOf = pallet_collator_selection::IdentityCollator; + type ValidatorRegistration = Session; + type WeightInfo = (); +} + +/// Configure the pallet template in pallets/template. +impl pallet_parachain_template::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = pallet_parachain_template::weights::SubstrateWeight; +} diff --git a/templates/parachain/runtime/src/xcm_config.rs b/templates/parachain/runtime/src/configs/xcm_config.rs similarity index 98% rename from templates/parachain/runtime/src/xcm_config.rs rename to templates/parachain/runtime/src/configs/xcm_config.rs index b1230ba1e5d4..13da2363b053 100644 --- a/templates/parachain/runtime/src/xcm_config.rs +++ b/templates/parachain/runtime/src/configs/xcm_config.rs @@ -1,4 +1,4 @@ -use super::{ +use crate::{ AccountId, AllPalletsWithSystem, Balances, ParachainInfo, ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, WeightToFee, XcmpQueue, }; @@ -137,6 +137,9 @@ impl xcm_executor::Config for XcmConfig { type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); } /// No local origins on this chain are allowed to dispatch XCM sends/executions. diff --git a/templates/parachain/runtime/src/lib.rs b/templates/parachain/runtime/src/lib.rs index d229bcfc4bbc..1416fa8d6cf3 100644 --- a/templates/parachain/runtime/src/lib.rs +++ b/templates/parachain/runtime/src/lib.rs @@ -8,62 +8,37 @@ extern crate alloc; #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); +pub mod apis; +mod configs; mod weights; -pub mod xcm_config; #[cfg(not(feature = "std"))] use alloc::{boxed::Box, vec::Vec}; -use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; -use polkadot_runtime_common::xcm_sender::NoPriceForMessageDelivery; use smallvec::smallvec; -use sp_api::impl_runtime_apis; -use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{BlakeTwo256, Block as BlockT, IdentifyAccount, Verify}, - transaction_validity::{TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, MultiSignature, + traits::{BlakeTwo256, IdentifyAccount, Verify}, + MultiSignature, }; #[cfg(feature = "std")] use sp_version::NativeVersion; use sp_version::RuntimeVersion; -use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; use frame_support::{ - construct_runtime, derive_impl, - dispatch::DispatchClass, - genesis_builder_helper::{build_config, create_default_config}, - parameter_types, - traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, TransformOrigin}, + construct_runtime, weights::{ - constants::WEIGHT_REF_TIME_PER_SECOND, ConstantMultiplier, Weight, WeightToFeeCoefficient, + constants::WEIGHT_REF_TIME_PER_SECOND, Weight, WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial, }, - PalletId, }; -use frame_system::{ - limits::{BlockLength, BlockWeights}, - EnsureRoot, -}; -use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; -use parachains_common::message_queue::{NarrowOriginToSibling, ParaIdToSibling}; pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; pub use sp_runtime::{MultiAddress, Perbill, Permill}; -use xcm_config::{RelayLocation, XcmOriginToTransactDispatchOrigin}; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; -// Polkadot imports -use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; - -use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; - -use pallet_aura::Authorities; - -// XCM Imports -use xcm::latest::prelude::BodyId; +use weights::ExtrinsicBaseWeight; /// Import the template pallet. pub use pallet_parachain_template; @@ -190,7 +165,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { authoring_version: 1, spec_version: 1, impl_version: 0, - apis: RUNTIME_API_VERSIONS, + apis: apis::RUNTIME_API_VERSIONS, transaction_version: 1, state_version: 1, }; @@ -249,253 +224,6 @@ pub fn native_version() -> NativeVersion { NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } } -parameter_types! { - pub const Version: RuntimeVersion = VERSION; - - // This part is copied from Substrate's `bin/node/runtime/src/lib.rs`. - // The `RuntimeBlockLength` and `RuntimeBlockWeights` exist here because the - // `DeletionWeightLimit` and `DeletionQueueDepth` depend on those to parameterize - // the lazy contract deletion. - pub RuntimeBlockLength: BlockLength = - BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); - pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() - .base_block(BlockExecutionWeight::get()) - .for_class(DispatchClass::all(), |weights| { - weights.base_extrinsic = ExtrinsicBaseWeight::get(); - }) - .for_class(DispatchClass::Normal, |weights| { - weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); - }) - .for_class(DispatchClass::Operational, |weights| { - weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); - // Operational transactions have some extra reserved space, so that they - // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. - weights.reserved = Some( - MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT - ); - }) - .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) - .build_or_panic(); - pub const SS58Prefix: u16 = 42; -} - -/// The default types are being injected by [`derive_impl`](`frame_support::derive_impl`) from -/// [`ParaChainDefaultConfig`](`struct@frame_system::config_preludes::ParaChainDefaultConfig`), -/// but overridden as needed. -#[derive_impl(frame_system::config_preludes::ParaChainDefaultConfig)] -impl frame_system::Config for Runtime { - /// The identifier used to distinguish between accounts. - type AccountId = AccountId; - /// The index type for storing how many extrinsics an account has signed. - type Nonce = Nonce; - /// The type for hashing blocks and tries. - type Hash = Hash; - /// The block type. - type Block = Block; - /// Maximum number of block number to block hash mappings to keep (oldest pruned first). - type BlockHashCount = BlockHashCount; - /// Runtime version. - type Version = Version; - /// The data to be stored in an account. - type AccountData = pallet_balances::AccountData; - /// The weight of database operations that the runtime can invoke. - type DbWeight = RocksDbWeight; - /// Block & extrinsics weights: base values and limits. - type BlockWeights = RuntimeBlockWeights; - /// The maximum length of a block (in bytes). - type BlockLength = RuntimeBlockLength; - /// This is used as an identifier of the chain. 42 is the generic substrate prefix. - type SS58Prefix = SS58Prefix; - /// The action to take on a Runtime Upgrade - type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; - type MaxConsumers = frame_support::traits::ConstU32<16>; -} - -impl pallet_timestamp::Config for Runtime { - /// A timestamp: milliseconds since the unix epoch. - type Moment = u64; - type OnTimestampSet = Aura; - type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>; - type WeightInfo = (); -} - -impl pallet_authorship::Config for Runtime { - type FindAuthor = pallet_session::FindAccountFromAuthorIndex; - type EventHandler = (CollatorSelection,); -} - -parameter_types! { - pub const ExistentialDeposit: Balance = EXISTENTIAL_DEPOSIT; -} - -impl pallet_balances::Config for Runtime { - type MaxLocks = ConstU32<50>; - /// The type for recording an account's balance. - type Balance = Balance; - /// The ubiquitous event type. - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - type WeightInfo = pallet_balances::weights::SubstrateWeight; - type MaxReserves = ConstU32<50>; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<0>; -} - -parameter_types! { - /// Relay Chain `TransactionByteFee` / 10 - pub const TransactionByteFee: Balance = 10 * MICROUNIT; -} - -impl pallet_transaction_payment::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type OnChargeTransaction = pallet_transaction_payment::CurrencyAdapter; - type WeightToFee = WeightToFee; - type LengthToFee = ConstantMultiplier; - type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; - type OperationalFeeMultiplier = ConstU8<5>; -} - -impl pallet_sudo::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; - type WeightInfo = (); -} - -parameter_types! { - pub const ReservedXcmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); - pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); - pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; -} - -impl cumulus_pallet_parachain_system::Config for Runtime { - type WeightInfo = (); - type RuntimeEvent = RuntimeEvent; - type OnSystemEvent = (); - type SelfParaId = parachain_info::Pallet; - type OutboundXcmpMessageSource = XcmpQueue; - type DmpQueue = frame_support::traits::EnqueueWithOrigin; - type ReservedDmpWeight = ReservedDmpWeight; - type XcmpMessageHandler = XcmpQueue; - type ReservedXcmpWeight = ReservedXcmpWeight; - type CheckAssociatedRelayNumber = RelayNumberStrictlyIncreases; - type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< - Runtime, - RELAY_CHAIN_SLOT_DURATION_MILLIS, - BLOCK_PROCESSING_VELOCITY, - UNINCLUDED_SEGMENT_CAPACITY, - >; -} - -impl parachain_info::Config for Runtime {} - -parameter_types! { - pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; -} - -impl pallet_message_queue::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = (); - #[cfg(feature = "runtime-benchmarks")] - type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< - cumulus_primitives_core::AggregateMessageOrigin, - >; - #[cfg(not(feature = "runtime-benchmarks"))] - type MessageProcessor = xcm_builder::ProcessXcmMessage< - AggregateMessageOrigin, - xcm_executor::XcmExecutor, - RuntimeCall, - >; - type Size = u32; - // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: - type QueueChangeHandler = NarrowOriginToSibling; - type QueuePausedQuery = NarrowOriginToSibling; - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; - type MaxStale = sp_core::ConstU32<8>; - type ServiceWeight = MessageQueueServiceWeight; -} - -impl cumulus_pallet_aura_ext::Config for Runtime {} - -impl cumulus_pallet_xcmp_queue::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type ChannelInfo = ParachainSystem; - type VersionWrapper = (); - // Enqueue XCMP messages from siblings for later processing. - type XcmpQueue = TransformOrigin; - type MaxInboundSuspended = sp_core::ConstU32<1_000>; - type ControllerOrigin = EnsureRoot; - type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; - type WeightInfo = (); - type PriceForSiblingDelivery = NoPriceForMessageDelivery; -} - -parameter_types! { - pub const Period: u32 = 6 * HOURS; - pub const Offset: u32 = 0; -} - -impl pallet_session::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type ValidatorId = ::AccountId; - // we don't have stash and controller, thus we don't need the convert as well. - type ValidatorIdOf = pallet_collator_selection::IdentityCollator; - type ShouldEndSession = pallet_session::PeriodicSessions; - type NextSessionRotation = pallet_session::PeriodicSessions; - type SessionManager = CollatorSelection; - // Essentially just Aura, but let's be pedantic. - type SessionHandler = ::KeyTypeIdProviders; - type Keys = SessionKeys; - type WeightInfo = (); -} - -impl pallet_aura::Config for Runtime { - type AuthorityId = AuraId; - type DisabledValidators = (); - type MaxAuthorities = ConstU32<100_000>; - type AllowMultipleBlocksPerSlot = ConstBool; - type SlotDuration = pallet_aura::MinimumPeriodTimesTwo; -} - -parameter_types! { - pub const PotId: PalletId = PalletId(*b"PotStake"); - pub const SessionLength: BlockNumber = 6 * HOURS; - // StakingAdmin pluralistic body. - pub const StakingAdminBodyId: BodyId = BodyId::Defense; -} - -/// We allow root and the StakingAdmin to execute privileged collator selection operations. -pub type CollatorSelectionUpdateOrigin = EitherOfDiverse< - EnsureRoot, - EnsureXcm>, ->; - -impl pallet_collator_selection::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type UpdateOrigin = CollatorSelectionUpdateOrigin; - type PotId = PotId; - type MaxCandidates = ConstU32<100>; - type MinEligibleCollators = ConstU32<4>; - type MaxInvulnerables = ConstU32<20>; - // should be a multiple of session or things will get inconsistent - type KickThreshold = Period; - type ValidatorId = ::AccountId; - type ValidatorIdOf = pallet_collator_selection::IdentityCollator; - type ValidatorRegistration = Session; - type WeightInfo = (); -} - -/// Configure the pallet template in pallets/template. -impl pallet_parachain_template::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = pallet_parachain_template::weights::SubstrateWeight; -} - // Create the runtime by composing the FRAME pallets that were previously configured. construct_runtime!( pub enum Runtime { @@ -545,230 +273,6 @@ mod benches { ); } -impl_runtime_apis! { - impl sp_consensus_aura::AuraApi for Runtime { - fn slot_duration() -> sp_consensus_aura::SlotDuration { - sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) - } - - fn authorities() -> Vec { - Authorities::::get().into_inner() - } - } - - impl sp_api::Core for Runtime { - fn version() -> RuntimeVersion { - VERSION - } - - fn execute_block(block: Block) { - Executive::execute_block(block) - } - - fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { - Executive::initialize_block(header) - } - } - - impl sp_api::Metadata for Runtime { - fn metadata() -> OpaqueMetadata { - OpaqueMetadata::new(Runtime::metadata().into()) - } - - fn metadata_at_version(version: u32) -> Option { - Runtime::metadata_at_version(version) - } - - fn metadata_versions() -> alloc::vec::Vec { - Runtime::metadata_versions() - } - } - - impl sp_block_builder::BlockBuilder for Runtime { - fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { - Executive::apply_extrinsic(extrinsic) - } - - fn finalize_block() -> ::Header { - Executive::finalize_block() - } - - fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { - data.create_extrinsics() - } - - fn check_inherents( - block: Block, - data: sp_inherents::InherentData, - ) -> sp_inherents::CheckInherentsResult { - data.check_extrinsics(&block) - } - } - - impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { - fn validate_transaction( - source: TransactionSource, - tx: ::Extrinsic, - block_hash: ::Hash, - ) -> TransactionValidity { - Executive::validate_transaction(source, tx, block_hash) - } - } - - impl sp_offchain::OffchainWorkerApi for Runtime { - fn offchain_worker(header: &::Header) { - Executive::offchain_worker(header) - } - } - - impl sp_session::SessionKeys for Runtime { - fn generate_session_keys(seed: Option>) -> Vec { - SessionKeys::generate(seed) - } - - fn decode_session_keys( - encoded: Vec, - ) -> Option, KeyTypeId)>> { - SessionKeys::decode_into_raw_public_keys(&encoded) - } - } - - impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { - fn account_nonce(account: AccountId) -> Nonce { - System::account_nonce(account) - } - } - - impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi for Runtime { - fn query_info( - uxt: ::Extrinsic, - len: u32, - ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo { - TransactionPayment::query_info(uxt, len) - } - fn query_fee_details( - uxt: ::Extrinsic, - len: u32, - ) -> pallet_transaction_payment::FeeDetails { - TransactionPayment::query_fee_details(uxt, len) - } - fn query_weight_to_fee(weight: Weight) -> Balance { - TransactionPayment::weight_to_fee(weight) - } - fn query_length_to_fee(length: u32) -> Balance { - TransactionPayment::length_to_fee(length) - } - } - - impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi - for Runtime - { - fn query_call_info( - call: RuntimeCall, - len: u32, - ) -> pallet_transaction_payment::RuntimeDispatchInfo { - TransactionPayment::query_call_info(call, len) - } - fn query_call_fee_details( - call: RuntimeCall, - len: u32, - ) -> pallet_transaction_payment::FeeDetails { - TransactionPayment::query_call_fee_details(call, len) - } - fn query_weight_to_fee(weight: Weight) -> Balance { - TransactionPayment::weight_to_fee(weight) - } - fn query_length_to_fee(length: u32) -> Balance { - TransactionPayment::length_to_fee(length) - } - } - - impl cumulus_primitives_core::CollectCollationInfo for Runtime { - fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { - ParachainSystem::collect_collation_info(header) - } - } - - #[cfg(feature = "try-runtime")] - impl frame_try_runtime::TryRuntime for Runtime { - fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { - let weight = Executive::try_runtime_upgrade(checks).unwrap(); - (weight, RuntimeBlockWeights::get().max_block) - } - - fn execute_block( - block: Block, - state_root_check: bool, - signature_check: bool, - select: frame_try_runtime::TryStateSelect, - ) -> Weight { - // NOTE: intentional unwrap: we don't want to propagate the error backwards, and want to - // have a backtrace here. - Executive::try_execute_block(block, state_root_check, signature_check, select).unwrap() - } - } - - #[cfg(feature = "runtime-benchmarks")] - impl frame_benchmarking::Benchmark for Runtime { - fn benchmark_metadata(extra: bool) -> ( - Vec, - Vec, - ) { - use frame_benchmarking::{Benchmarking, BenchmarkList}; - use frame_support::traits::StorageInfoTrait; - use frame_system_benchmarking::Pallet as SystemBench; - use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - - let mut list = Vec::::new(); - list_benchmarks!(list, extra); - - let storage_info = AllPalletsWithSystem::storage_info(); - (list, storage_info) - } - - fn dispatch_benchmark( - config: frame_benchmarking::BenchmarkConfig - ) -> Result, sp_runtime::RuntimeString> { - use frame_benchmarking::{BenchmarkError, Benchmarking, BenchmarkBatch}; - - use frame_system_benchmarking::Pallet as SystemBench; - impl frame_system_benchmarking::Config for Runtime { - fn setup_set_code_requirements(code: &alloc::vec::Vec) -> Result<(), BenchmarkError> { - ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); - Ok(()) - } - - fn verify_set_code() { - System::assert_last_event(cumulus_pallet_parachain_system::Event::::ValidationFunctionStored.into()); - } - } - - use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - impl cumulus_pallet_session_benchmarking::Config for Runtime {} - - use frame_support::traits::WhitelistedStorageKeys; - let whitelist = AllPalletsWithSystem::whitelisted_storage_keys(); - - let mut batches = Vec::::new(); - let params = (&config, &whitelist); - add_benchmarks!(params, batches); - - if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) } - Ok(batches) - } - } - - impl sp_genesis_builder::GenesisBuilder for Runtime { - fn create_default_config() -> Vec { - create_default_config::() - } - - fn build_config(config: Vec) -> sp_genesis_builder::Result { - build_config::(config) - } - } -} - cumulus_pallet_parachain_system::register_validate_block! { Runtime = Runtime, BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, diff --git a/templates/solochain/node/Cargo.toml b/templates/solochain/node/Cargo.toml index a0bb5c27ed3b..1ddd1cb3a7b4 100644 --- a/templates/solochain/node/Cargo.toml +++ b/templates/solochain/node/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] clap = { version = "4.5.3", features = ["derive"] } -futures = { version = "0.3.21", features = ["thread-pool"] } +futures = { version = "0.3.30", features = ["thread-pool"] } serde_json = { workspace = true, default-features = true } jsonrpsee = { version = "0.22", features = ["server"] } diff --git a/templates/solochain/pallets/template/Cargo.toml b/templates/solochain/pallets/template/Cargo.toml index bd2347151989..24519f1d22e0 100644 --- a/templates/solochain/pallets/template/Cargo.toml +++ b/templates/solochain/pallets/template/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = [ +scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } diff --git a/templates/solochain/runtime/Cargo.toml b/templates/solochain/runtime/Cargo.toml index 11bdc5907733..bfe534615a54 100644 --- a/templates/solochain/runtime/Cargo.toml +++ b/templates/solochain/runtime/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = [ +scale-info = { version = "2.11.1", default-features = false, features = [ "derive", "serde", ] } @@ -61,7 +61,7 @@ sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool", sp-version = { path = "../../../substrate/primitives/version", default-features = false, features = [ "serde", ] } -sp-genesis-builder = { default-features = false, path = "../../../substrate/primitives/genesis-builder" } +sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } # RPC related frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false }