diff --git a/.github/workflows/cd.yaml b/.github/workflows/cd.yaml new file mode 100644 index 0000000000..4f5c88126f --- /dev/null +++ b/.github/workflows/cd.yaml @@ -0,0 +1,75 @@ +name: Continuous Deployment + +on: + push: + tags: + - "v*.*.*" + +jobs: + linux-release: + name: Linux Release + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Build + run: cargo build --release + - name: Archive + working-directory: target/release + run: tar -czvf grin-${{ github.ref_name }}-linux-x86_64.tar.gz grin + - name: Create Checksum + working-directory: target/release + run: openssl sha256 grin-${{ github.ref_name }}-linux-x86_64.tar.gz > grin-${{ github.ref_name }}-linux-x86_64-sha256sum.txt + - name: Release + uses: softprops/action-gh-release@v1 + with: + generate_release_notes: true + files: | + target/release/grin-${{ github.ref_name }}-linux-x86_64.tar.gz + target/release/grin-${{ github.ref_name }}-linux-x86_64-sha256sum.txt + + macos-release: + name: macOS Release + runs-on: macos-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Build + run: cargo build --release + - name: Archive + working-directory: target/release + run: tar -czvf grin-${{ github.ref_name }}-macos-x86_64.tar.gz grin + - name: Create Checksum + working-directory: target/release + run: openssl sha256 grin-${{ github.ref_name }}-macos-x86_64.tar.gz > grin-${{ github.ref_name }}-macos-x86_64-sha256sum.txt + - name: Release + uses: softprops/action-gh-release@v1 + with: + files: | + target/release/grin-${{ github.ref_name }}-macos-x86_64.tar.gz + target/release/grin-${{ github.ref_name }}-macos-x86_64-sha256sum.txt + + windows-release: + name: Windows Release + runs-on: windows-2019 + env: + ROARING_ARCH: x86-64-v2 + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Build + run: cargo build --release + - name: Archive + uses: vimtor/action-zip@v1 + with: + files: target/release/grin.exe + dest: target/release/grin-${{ github.ref_name }}-win-x86_64.zip + - name: Create Checksum + working-directory: target/release + shell: pwsh + run: get-filehash -algorithm sha256 grin-${{ github.ref_name }}-win-x86_64.zip | Format-List |  Out-String | ForEach-Object { $_.Trim() } > grin-${{ github.ref_name }}-win-x86_64-sha256sum.txt + - name: Release + uses: softprops/action-gh-release@v1 + with: + files: | + target/release/grin-${{ github.ref_name }}-win-x86_64.zip + target/release/grin-${{ github.ref_name }}-win-x86_64-sha256sum.txt \ No newline at end of file diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml new file mode 100644 index 0000000000..3e64c03f08 --- /dev/null +++ b/.github/workflows/ci.yaml @@ -0,0 +1,35 @@ +name: Continuous Integration +on: [push, pull_request] + +jobs: + linux-tests: + name: Linux Tests + runs-on: ubuntu-latest + strategy: + matrix: + job_args: [servers, chain, core, keychain, pool, p2p, src, api, util, store] + steps: + - uses: actions/checkout@v3 + - name: Test ${{ matrix.job_args }} + working-directory: ${{ matrix.job_args }} + run: cargo test --release + + macos-tests: + name: macOS Tests + runs-on: macos-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Tests + run: cargo test --release --all + + windows-tests: + name: Windows Tests + runs-on: windows-2019 + env: + ROARING_ARCH: x86-64-v2 + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Tests + run: cargo test --release --all \ No newline at end of file diff --git a/.hooks/pre-commit b/.hooks/pre-commit index 8056d6cb81..7c029edeba 100755 --- a/.hooks/pre-commit +++ b/.hooks/pre-commit @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# Copyright 2018 The Grin Developers +# Copyright 2021 The Grin Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 51d9a26727..9287f5a792 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -28,13 +28,13 @@ The development team will be happy to help and guide you with any of these point When you are starting to contribute to grin, we really would appreciate if you come by the gitter chat channels. -In case of problems with trying out grin, before starting to contribute, there's the [Support chat](https://gitter.im/grin_community/support). Write there about what you've done, what you want to do, and maybe paste logs through a text paste webservice. +In case of problems with trying out grin, before starting to contribute, there's the [grincoin#support](https://keybase.io/team/grincoin) on Keybase. Write there about what you've done, what you want to do, and maybe paste logs through a text paste webservice. -* Please [join the grin Lobby](https://gitter.im/grin_community/Lobby) to get a feeling for the community. -* And [see the developers chat](https://gitter.im/grin_community/dev) if you have questions about source code files. +* Please [join the grincoin#general on Keybase](https://keybase.io/team/grincoin) to get a feeling for the community. +* And see the developers chat channel [grincoin#dev on Keybase](https://keybase.io/team/grincoin) if you have questions about source code files. If you explain what you're looking at and what you want to do, we'll try to help you along the way. -* Also see `docs/*.md` and the folder structure explanations, and [the wiki](https://github.com/mimblewimble/docs/wiki). -* Further information and discussions are in the [Forum](https://forum.grin.mw), the [website](https://grin.mw), the [mailing list](https://lists.launchpad.net/mimblewimble/) and news channels like the [@grincouncil](https://twitter.com/grincouncil) and a (mostly unfiltered!) Twitter bot that collects headlines, mailing list posts, and reddit posts related to Mimblewimble/Grin: [@grinmw](https://twitter.com/grinmw) +* See `docs/*.md` and the folder structure explanations, [the wiki](https://github.com/mimblewimble/docs/wiki) and the official [Grin documentation](https://docs.grin.mw/). +* Further information and discussions are in the [Forum](https://forum.grin.mw), the [website](https://grin.mw), the [mailing list](https://lists.launchpad.net/mimblewimble/) and news channels like the [Reddit/grincoin](https://www.reddit.com/r/grincoin/) and a (mostly unfiltered!) Twitter bot that collects headlines, mailing list posts, and reddit posts related to Mimblewimble/Grin: [@grinmw](https://twitter.com/grinmw) ## Testing diff --git a/Cargo.lock b/Cargo.lock index 0c9925f6b4..e3a55e3640 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,9 +4,9 @@ version = 3 [[package]] name = "addr2line" -version = "0.16.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e61f2b7f93d2c7d2b08263acaa4a363b3e276806c68af6134c44f523bf1aacd" +checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" dependencies = [ "gimli", ] @@ -19,74 +19,74 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "aead" -version = "0.3.2" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fc95d1bdb8e6666b2b217308eeeb09f2d6728d104be3e31916cc74d15420331" +checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" dependencies = [ - "generic-array 0.14.4", + "crypto-common", + "generic-array 0.14.7", ] [[package]] name = "aes" -version = "0.5.0" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd2bc6d3f370b5666245ff421e231cba4353df936e26986d2918e61a8fd6aef6" +checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" dependencies = [ - "aes-soft", - "aesni", - "block-cipher", + "cfg-if 1.0.0", + "cipher", + "cpufeatures", ] [[package]] name = "aes-gcm" -version = "0.7.0" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0301c9e9c443494d970a07885e8cf3e587bae8356a1d5abd0999068413f7205f" +checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" dependencies = [ "aead", "aes", - "block-cipher", + "cipher", + "ctr", "ghash", "subtle 2.4.1", ] [[package]] -name = "aes-soft" -version = "0.5.0" +name = "ahash" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63dd91889c49327ad7ef3b500fd1109dbd3c509a03db0d4a9ce413b79f575cb6" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ - "block-cipher", - "byteorder", - "opaque-debug 0.3.0", + "cfg-if 1.0.0", + "getrandom 0.2.15", + "once_cell", + "version_check", + "zerocopy", ] [[package]] -name = "aesni" -version = "0.8.0" +name = "aho-corasick" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a6fe808308bb07d393e2ea47780043ec47683fcf19cf5efc8ca51c50cc8c68a" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ - "block-cipher", - "opaque-debug 0.3.0", + "memchr", ] [[package]] -name = "ahash" -version = "0.4.7" +name = "android-tzdata" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "739f4a8db6605981345c5654f3a85b056ce52f37a39d34da03f25bf2151ea16e" -dependencies = [ - "const-random", -] +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" [[package]] -name = "aho-corasick" -version = "0.7.18" +name = "android_system_properties" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" dependencies = [ - "memchr", + "libc", ] [[package]] @@ -98,11 +98,20 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "ansi_term" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" +dependencies = [ + "winapi 0.3.9", +] + [[package]] name = "anyhow" -version = "1.0.42" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "595d3cfa7a60d4555cb5067b99f07142a08ea778de5cf993f7b75c7d8fabc486" +checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" [[package]] name = "arc-swap" @@ -110,27 +119,11 @@ version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dabe5a181f83789739c194cbe5a897dde195078fac08568d09221fd6137a7ba8" -[[package]] -name = "array-macro" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06e97b4e522f9e55523001238ac59d13a8603af57f69980de5d8de4bbbe8ada6" - [[package]] name = "arrayref" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" - -[[package]] -name = "arrayvec" -version = "0.3.25" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06f59fe10306bb78facd90d28c2038ad23ffaaefa85bac43c8a434cde383334f" -dependencies = [ - "nodrop", - "odds", -] +checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" [[package]] name = "arrayvec" @@ -147,6 +140,12 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" +[[package]] +name = "arrayvec" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" + [[package]] name = "asn1_der" version = "0.6.3" @@ -162,110 +161,141 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d0864d84b8e07b145449be9a8537db86bf9de5ce03b913214694643b4743502" dependencies = [ - "quote 1.0.9", - "syn 1.0.74", + "quote 1.0.36", + "syn 1.0.109", ] [[package]] name = "async-channel" -version = "1.6.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2114d64672151c0c5eaa5e131ec84a74f06e1e559830dabba01ca30605d66319" +checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" dependencies = [ "concurrent-queue", - "event-listener", + "event-listener 2.5.3", "futures-core", ] +[[package]] +name = "async-channel" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" +dependencies = [ + "concurrent-queue", + "event-listener-strategy", + "futures-core", + "pin-project-lite 0.2.14", +] + [[package]] name = "async-executor" -version = "1.4.1" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "871f9bb5e0a22eeb7e8cf16641feb87c9dc67032ccf8ff49e772eb9941d3a965" +checksum = "c8828ec6e544c02b0d6691d21ed9f9218d0384a82542855073c2a3f58304aaf0" dependencies = [ "async-task", "concurrent-queue", - "fastrand", - "futures-lite", - "once_cell", + "fastrand 2.1.0", + "futures-lite 2.3.0", "slab", ] [[package]] name = "async-global-executor" -version = "2.0.2" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9586ec52317f36de58453159d48351bc244bc24ced3effc1fce22f3d48664af6" +checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" dependencies = [ - "async-channel", + "async-channel 2.3.1", "async-executor", - "async-io", - "async-mutex", + "async-io 2.3.3", + "async-lock 3.4.0", "blocking", - "futures-lite", - "num_cpus", + "futures-lite 2.3.0", "once_cell", ] [[package]] name = "async-io" -version = "1.6.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a811e6a479f2439f0c04038796b5cfb3d2ad56c230e0f2d3f7b04d68cfee607b" +checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" dependencies = [ + "async-lock 2.8.0", + "autocfg 1.3.0", + "cfg-if 1.0.0", "concurrent-queue", - "futures-lite", - "libc", + "futures-lite 1.13.0", "log", - "once_cell", "parking", - "polling", + "polling 2.8.0", + "rustix 0.37.27", "slab", - "socket2 0.4.1", + "socket2 0.4.10", "waker-fn", - "winapi 0.3.9", +] + +[[package]] +name = "async-io" +version = "2.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d6baa8f0178795da0e71bc42c9e5d13261aac7ee549853162e66a241ba17964" +dependencies = [ + "async-lock 3.4.0", + "cfg-if 1.0.0", + "concurrent-queue", + "futures-io", + "futures-lite 2.3.0", + "parking", + "polling 3.7.2", + "rustix 0.38.34", + "slab", + "tracing", + "windows-sys 0.52.0", ] [[package]] name = "async-lock" -version = "2.4.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6a8ea61bf9947a1007c5cada31e647dbc77b103c679858150003ba697ea798b" +checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" dependencies = [ - "event-listener", + "event-listener 2.5.3", ] [[package]] -name = "async-mutex" -version = "1.4.0" +name = "async-lock" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479db852db25d9dbf6204e6cb6253698f175c15726470f78af0d918e99d6156e" +checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" dependencies = [ - "event-listener", + "event-listener 5.3.1", + "event-listener-strategy", + "pin-project-lite 0.2.14", ] [[package]] name = "async-std" -version = "1.9.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9f06685bad74e0570f5213741bea82158279a4103d988e57bfada11ad230341" +checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" dependencies = [ - "async-channel", + "async-channel 1.9.0", "async-global-executor", - "async-io", - "async-lock", - "crossbeam-utils 0.8.5", + "async-io 1.13.0", + "async-lock 2.8.0", + "crossbeam-utils", "futures-channel", "futures-core", "futures-io", - "futures-lite", + "futures-lite 1.13.0", "gloo-timers", "kv-log-macro", "log", "memchr", - "num_cpus", "once_cell", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.14", "pin-utils", "slab", "wasm-bindgen-futures", @@ -273,37 +303,40 @@ dependencies = [ [[package]] name = "async-task" -version = "4.0.3" +version = "4.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91831deabf0d6d7ec49552e489aed63b7456a7a3c46cff62adad428110b0af0" +checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" [[package]] name = "asynchronous-codec" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0de5164e5edbf51c45fb8c2d9664ae1c095cce1b265ecf7569093c0d66ef690" +checksum = "4057f2c32adbb2fc158e22fb38433c8e9bbf76b75a4732c7c0cbaf695fb65568" dependencies = [ - "bytes 1.0.1", + "bytes 1.6.0", "futures-sink", "futures-util", "memchr", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.14", ] [[package]] name = "atomic" -version = "0.5.0" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3410529e8288c463bedb5930f82833bc0c90e5d2fe639a56582a4d09220b281" -dependencies = [ - "autocfg 1.0.1", -] +checksum = "c59bdb34bc650a32731b31bd8f0829cc15d24a708ee31559e0bb34f2bc320cba" [[package]] name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + +[[package]] +name = "atomic_float" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "065374052e7df7ee4047b1160cca5e1467a12351a40b3da123c870ba0b8eda2a" +checksum = "3c4b08ed8a30ff7320117c190eb4d73d47f0ac0c930ab853b8224cef7cd9a5e7" [[package]] name = "atty" @@ -311,28 +344,31 @@ version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ - "hermit-abi", + "hermit-abi 0.1.19", "libc", "winapi 0.3.9", ] [[package]] name = "autocfg" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2" +checksum = "0dde43e75fd43e8a1bf86103336bc699aa8d17ad1be60c76c0bdfd4828e19b78" +dependencies = [ + "autocfg 1.3.0", +] [[package]] name = "autocfg" -version = "1.0.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" +checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "backtrace" -version = "0.3.61" +version = "0.3.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7a905d892734eea339e896738c14b9afce22b5318f64b951e70bf3844419b01" +checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" dependencies = [ "addr2line", "cc", @@ -357,15 +393,15 @@ checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" [[package]] name = "base64" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64ct" -version = "1.0.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0d27fb6b6f1e43147af148af49d49329413ba781aa0d5e10979831c210173b5" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] name = "bit-vec" @@ -381,19 +417,23 @@ checksum = "4efd02e230a02e18f92fc2735f44597385ed02ad8f831e7c1c1156ee5e1ab3a5" [[package]] name = "bitflags" -version = "1.2.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" [[package]] name = "blake2" -version = "0.9.1" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10a5720225ef5daecf08657f23791354e1685a8c91a4c60c7f3d3b2892f978f4" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" dependencies = [ - "crypto-mac 0.8.0", - "digest 0.9.0", - "opaque-debug 0.3.0", + "digest 0.10.7", ] [[package]] @@ -435,16 +475,16 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "generic-array 0.14.4", + "generic-array 0.14.7", ] [[package]] -name = "block-cipher" -version = "0.8.0" +name = "block-buffer" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f337a3e6da609650eb74e02bc9fac7b735049f7623ab12f2e4c719316fcc7e80" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ - "generic-array 0.14.4", + "generic-array 0.14.7", ] [[package]] @@ -458,16 +498,15 @@ dependencies = [ [[package]] name = "blocking" -version = "1.0.2" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5e170dbede1f740736619b776d7251cb1b9095c435c34d8ca9f57fcd2f335e9" +checksum = "703f41c54fc768e63e091340b424302bb1c29ef4aa0c7f10fe849dfb114d29ea" dependencies = [ - "async-channel", + "async-channel 2.3.1", "async-task", - "atomic-waker", - "fastrand", - "futures-lite", - "once_cell", + "futures-io", + "futures-lite 2.3.0", + "piper", ] [[package]] @@ -488,9 +527,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.7.0" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c59e7af012c713f529e7a3ee57ce9b31ddd858d4b512923602f74608b009631" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "byte-tools" @@ -500,9 +539,9 @@ checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" [[package]] name = "byteorder" -version = "1.4.3" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" @@ -522,15 +561,9 @@ checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" [[package]] name = "bytes" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" - -[[package]] -name = "cache-padded" -version = "1.1.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "631ae5198c9be5e753e5cc215e1bd73c2b466a3565173db433f52bb9d3e66dba" +checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" [[package]] name = "cargo-lock" @@ -538,7 +571,7 @@ version = "4.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8504b63dd1249fd1745b7b4ef9b6f7b107ddeb3c95370043c7dbcc38653a2679" dependencies = [ - "semver", + "semver 0.9.0", "serde", "toml", "url", @@ -546,11 +579,13 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.69" +version = "1.0.102" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e70cc2f62c6ce1868963827bd677764c62d07c3d9a3e1fb1177ee1a9ab199eb2" +checksum = "779e6b7d17797c0b42023d417228c02889300190e700cb074c3438d9c541d332" dependencies = [ "jobserver", + "libc", + "once_cell", ] [[package]] @@ -565,53 +600,72 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" + [[package]] name = "chacha20" -version = "0.5.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "244fbce0d47e97e8ef2f63b81d5e05882cb518c68531eb33194990d7b7e85845" +checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818" dependencies = [ - "stream-cipher", - "zeroize", + "cfg-if 1.0.0", + "cipher", + "cpufeatures", ] [[package]] name = "chacha20poly1305" -version = "0.6.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bf18d374d66df0c05cdddd528a7db98f78c28e2519b120855c4f84c5027b1f5" +checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35" dependencies = [ "aead", "chacha20", + "cipher", "poly1305", - "stream-cipher", "zeroize", ] [[package]] name = "chrono" -version = "0.4.19" +version = "0.4.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" dependencies = [ - "libc", - "num-integer", - "num-traits 0.2.14", + "android-tzdata", + "iana-time-zone", + "js-sys", + "num-traits 0.2.19", "serde", - "time", - "winapi 0.3.9", + "wasm-bindgen", + "windows-targets 0.52.5", +] + +[[package]] +name = "cipher" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" +dependencies = [ + "crypto-common", + "inout", + "zeroize", ] [[package]] name = "clap" -version = "2.33.3" +version = "2.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002" +checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" dependencies = [ - "ansi_term", + "ansi_term 0.12.1", "atty", - "bitflags 1.2.1", - "strsim 0.8.0", + "bitflags 1.3.2", + "strsim", "textwrap", "unicode-width", "vec_map", @@ -624,38 +678,16 @@ version = "0.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" dependencies = [ - "bitflags 1.2.1", + "bitflags 1.3.2", ] [[package]] name = "concurrent-queue" -version = "1.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ed07550be01594c6026cff2a1d7fe9c8f683caa798e12b68694ac9e88286a3" -dependencies = [ - "cache-padded", -] - -[[package]] -name = "const-random" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f590d95d011aa80b063ffe3253422ed5aa462af4e9867d43ce8337562bac77c4" -dependencies = [ - "const-random-macro", - "proc-macro-hack", -] - -[[package]] -name = "const-random-macro" -version = "0.1.13" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "615f6e27d000a2bffbc7f2f6a8669179378fa27ee4d0a509e985dfc0a7defb40" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" dependencies = [ - "getrandom 0.2.3", - "lazy_static", - "proc-macro-hack", - "tiny-keccak", + "crossbeam-utils", ] [[package]] @@ -670,7 +702,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57d24c7a13c43e870e37c1556b74555437870a04514f7685f5b354e090567171" dependencies = [ - "core-foundation-sys", + "core-foundation-sys 0.7.0", "libc", ] @@ -680,35 +712,35 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac" +[[package]] +name = "core-foundation-sys" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" + [[package]] name = "cpufeatures" -version = "0.1.5" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66c99696f6c9dd7f35d486b9d04d7e6e202aa3e8c40d553f2fdf5e7e0c6a71ef" +checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" dependencies = [ "libc", ] -[[package]] -name = "cpuid-bool" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcb25d077389e53838a8158c8e99174c5a9d902dee4904320db714f3c653ffba" - [[package]] name = "crc32fast" -version = "1.2.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" +checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" dependencies = [ "cfg-if 1.0.0", ] [[package]] name = "croaring" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7266f0a7275b00ce4c4f4753e8c31afdefe93828101ece83a06e2ddab1dd1010" +checksum = "611eaefca84c93e431ad82dfb848f6e05a99e25148384f45a3852b0fbe1c8086" dependencies = [ "byteorder", "croaring-sys", @@ -716,77 +748,46 @@ dependencies = [ [[package]] name = "croaring-sys" -version = "1.1.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e47112498c394a7067949ebc07ef429b7384a413cf0efcf675846a47bcd307fb" +checksum = "3e5fed89265a702f0085844237a7ebbadf8a7c42de6304fddca30a5013f9aecb" dependencies = [ "cc", ] [[package]] name = "crossbeam-channel" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87" -dependencies = [ - "crossbeam-utils 0.7.2", - "maybe-uninit", -] - -[[package]] -name = "crossbeam-channel" -version = "0.5.1" +version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ed27e177f16d65f0f0c22a213e17c696ace5dd64b14258b52f9417ccb52db4" +checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" dependencies = [ - "cfg-if 1.0.0", - "crossbeam-utils 0.8.5", + "crossbeam-utils", ] [[package]] name = "crossbeam-deque" -version = "0.8.1" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" +checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" dependencies = [ - "cfg-if 1.0.0", "crossbeam-epoch", - "crossbeam-utils 0.8.5", + "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" -version = "0.9.5" +version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec02e091aa634e2c3ada4a392989e7c3116673ef0ac5b72232439094d73b7fd" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ - "cfg-if 1.0.0", - "crossbeam-utils 0.8.5", - "lazy_static", - "memoffset", - "scopeguard", + "crossbeam-utils", ] [[package]] name = "crossbeam-utils" -version = "0.7.2" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" -dependencies = [ - "autocfg 1.0.1", - "cfg-if 0.1.10", - "lazy_static", -] - -[[package]] -name = "crossbeam-utils" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d82cfc11ce7f2c3faef78d8a684447b40d503d9681acebed6cb728d45940c4db" -dependencies = [ - "cfg-if 1.0.0", - "lazy_static", -] +checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" [[package]] name = "crunchy" @@ -795,23 +796,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" [[package]] -name = "crypto-mac" -version = "0.7.0" +name = "crypto-common" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ - "generic-array 0.12.4", - "subtle 1.0.0", + "generic-array 0.14.7", + "rand_core 0.6.4", + "typenum", ] [[package]] name = "crypto-mac" -version = "0.8.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" +checksum = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5" dependencies = [ - "generic-array 0.14.4", - "subtle 2.4.1", + "generic-array 0.12.4", + "subtle 1.0.0", ] [[package]] @@ -820,7 +822,7 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" dependencies = [ - "generic-array 0.14.4", + "generic-array 0.14.7", "subtle 2.4.1", ] @@ -844,35 +846,43 @@ dependencies = [ [[package]] name = "ctor" -version = "0.1.20" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e98e2ad1a782e33928b96fc3948e7c355e5af34ba4de7670fe8bac2a3b2006d" +checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" dependencies = [ - "quote 1.0.9", - "syn 1.0.74", + "quote 1.0.36", + "syn 1.0.109", +] + +[[package]] +name = "ctr" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" +dependencies = [ + "cipher", ] [[package]] name = "ctrlc" -version = "3.1.9" +version = "3.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "232295399409a8b7ae41276757b5a1cc21032848d42bff2352261f958b3ca29a" +checksum = "672465ae37dc1bc6380a6547a8883d5dd397b0f1faaad4f265726cc7042a5345" dependencies = [ "nix", - "winapi 0.3.9", + "windows-sys 0.52.0", ] [[package]] name = "cursive" -version = "0.15.2" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34ceb8704199492858fea2cedf8d9adde0db27755bc4313d771923087f30fbc6" +checksum = "5438eb16bdd8af51b31e74764fef5d0a9260227a5ec82ba75c9d11ce46595839" dependencies = [ "ahash", - "cfg-if 0.1.10", - "crossbeam-channel 0.4.4", + "cfg-if 1.0.0", + "crossbeam-channel", "cursive_core", - "enumset", "lazy_static", "libc", "log", @@ -886,26 +896,33 @@ dependencies = [ [[package]] name = "cursive_core" -version = "0.1.3" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "158e802c91192df6ecd1b9a9f2cbb711fab744ed5ca154f8b47339b83dc5cfb2" +checksum = "4db3b58161228d0dcb45c7968c5e74c3f03ad39e8983e58ad7d57061aa2cd94d" dependencies = [ "ahash", - "chrono", - "crossbeam-channel 0.4.4", + "crossbeam-channel", "enum-map", "enumset", "lazy_static", - "libc", "log", - "num 0.3.1", + "num 0.4.3", "owning_ref", - "signal-hook", + "time", "unicode-segmentation", "unicode-width", "xi-unicode", ] +[[package]] +name = "cursive_table_view" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8935dd87d19c54b7506b245bc988a7b4e65b1058e1d0d64c0ad9b3188e48060" +dependencies = [ + "cursive_core", +] + [[package]] name = "curve25519-dalek" version = "3.2.0" @@ -919,11 +936,37 @@ dependencies = [ "zeroize", ] +[[package]] +name = "curve25519-dalek" +version = "4.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" +dependencies = [ + "cfg-if 1.0.0", + "cpufeatures", + "curve25519-dalek-derive", + "fiat-crypto", + "rustc_version", + "subtle 2.4.1", + "zeroize", +] + +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" +dependencies = [ + "proc-macro2 1.0.86", + "quote 1.0.36", + "syn 2.0.68", +] + [[package]] name = "darling" -version = "0.13.0" +version = "0.20.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "757c0ded2af11d8e739c4daea1ac623dd1624b06c844cf3f5a39f1bdbd99bb12" +checksum = "83b2eb4d90d12bdda5ed17de686c2acb4c57914f8f921b8da7e112b5a36f3fe1" dependencies = [ "darling_core", "darling_macro", @@ -931,34 +974,42 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.13.0" +version = "0.20.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c34d8efb62d0c2d7f60ece80f75e5c63c1588ba68032740494b0b9a996466e3" +checksum = "622687fe0bac72a04e5599029151f5796111b90f1baaa9b544d807a5e31cd120" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.28", - "quote 1.0.9", - "strsim 0.10.0", - "syn 1.0.74", + "proc-macro2 1.0.86", + "quote 1.0.36", + "syn 2.0.68", ] [[package]] name = "darling_macro" -version = "0.13.0" +version = "0.20.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ade7bff147130fe5e6d39f089c6bd49ec0250f35d70b2eebf72afdfc919f15cc" +checksum = "733cabb43482b1a1b53eee8583c2b9e8684d592215ea83efd305dd31bc2f0178" dependencies = [ "darling_core", - "quote 1.0.9", - "syn 1.0.74", + "quote 1.0.36", + "syn 2.0.68", ] [[package]] name = "data-encoding" -version = "2.3.2" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" +checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" + +[[package]] +name = "deranged" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +dependencies = [ + "powerfmt", +] [[package]] name = "difference" @@ -981,7 +1032,18 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "generic-array 0.14.4", + "generic-array 0.14.7", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer 0.10.4", + "crypto-common", + "subtle 2.4.1", ] [[package]] @@ -1007,12 +1069,12 @@ dependencies = [ [[package]] name = "dirs-sys" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03d86534ed367a67548dc68113a0f5db55432fdfbb6e6f9d77704397d95d5780" +checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6" dependencies = [ "libc", - "redox_users 0.4.0", + "redox_users 0.4.5", "winapi 0.3.9", ] @@ -1022,12 +1084,6 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" -[[package]] -name = "dtoa" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56899898ce76aaf4a0f24d914c97ea6ed976d42fec6ad33fcbb0a1103e07b2b0" - [[package]] name = "easy-jsonrpc-mw" version = "0.5.4" @@ -1055,9 +1111,9 @@ dependencies = [ [[package]] name = "ed25519" -version = "1.2.0" +version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4620d40f6d2601794401d6dd95a5cf69b6c157852539470eeda433a99b3c0efc" +checksum = "91cff35c70bba8a626e3185d8cd48cc11b5437e1a5bcd15b9b5fa3c64b6dfee7" dependencies = [ "signature", ] @@ -1068,39 +1124,38 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" dependencies = [ - "curve25519-dalek", + "curve25519-dalek 3.2.0", "ed25519", "rand 0.7.3", "serde", - "sha2 0.9.5", + "sha2 0.9.9", "zeroize", ] [[package]] name = "either" -version = "1.6.1" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" [[package]] name = "enum-map" -version = "0.6.4" +version = "2.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4187999839f4ae8be35cf185d1381aa8dc32d2f5951349cc37ae49ebc4781855" +checksum = "6866f3bfdf8207509a033af1a75a7b08abda06bbaaeae6669323fd5a097df2e9" dependencies = [ - "array-macro", "enum-map-derive", ] [[package]] name = "enum-map-derive" -version = "0.4.6" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5c450cf304c9e18d45db562025a14fb1ca0f5c769b6f609309f81d4c31de455" +checksum = "f282cfdfe92516eb26c2af8589c274c7c17681f5ecc03c18255fe741c6aa64eb" dependencies = [ - "proc-macro2 1.0.28", - "quote 1.0.9", - "syn 1.0.74", + "proc-macro2 1.0.86", + "quote 1.0.36", + "syn 2.0.68", ] [[package]] @@ -1114,23 +1169,23 @@ dependencies = [ [[package]] name = "enumset" -version = "1.0.7" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e76129da36102af021b8e5000dab2c1c30dbef85c1e482beeff8da5dde0e0b0" +checksum = "226c0da7462c13fb57e5cc9e0dc8f0635e7d27f276a3a7fd30054647f669007d" dependencies = [ "enumset_derive", ] [[package]] name = "enumset_derive" -version = "0.5.5" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6451128aa6655d880755345d085494cf7561a6bee7c8dc821e5d77e6d267ecd4" +checksum = "e08b6c6ab82d70f08844964ba10c7babb716de2ecaeab9be5717918a5177d3af" dependencies = [ "darling", - "proc-macro2 1.0.28", - "quote 1.0.9", - "syn 1.0.74", + "proc-macro2 1.0.86", + "quote 1.0.36", + "syn 2.0.68", ] [[package]] @@ -1146,32 +1201,47 @@ dependencies = [ "termcolor", ] +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + +[[package]] +name = "errno" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + [[package]] name = "event-listener" -version = "2.5.1" +version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7531096570974c3a9dcf9e4b8e1cede1ec26cf5046219fb3b9d897503b9be59" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] -name = "failure" -version = "0.1.8" +name = "event-listener" +version = "5.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d32e9bd16cc02eae7db7ef620b392808b89f6a5e16bb3497d159c6b92a0f4f86" +checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" dependencies = [ - "backtrace", - "failure_derive", + "concurrent-queue", + "parking", + "pin-project-lite 0.2.14", ] [[package]] -name = "failure_derive" -version = "0.1.8" +name = "event-listener-strategy" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4" +checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" dependencies = [ - "proc-macro2 1.0.28", - "quote 1.0.9", - "syn 1.0.74", - "synstructure", + "event-listener 5.3.1", + "pin-project-lite 0.2.14", ] [[package]] @@ -1182,23 +1252,35 @@ checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" [[package]] name = "fastrand" -version = "1.5.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b394ed3d285a429378d3b384b9eb1285267e7df4b166df24b7a6939a04dc392e" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" dependencies = [ "instant", ] +[[package]] +name = "fastrand" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" + +[[package]] +name = "fiat-crypto" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" + [[package]] name = "filetime" -version = "0.2.14" +version = "0.2.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d34cfa13a63ae058bfa601fe9e313bbdb3746427c1459185464ce0fcf62e1e8" +checksum = "1ee447700ac8aa0b2f2bd7bc4462ad686ba06baa6727ac149a2d6277f0d240fd" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall 0.2.9", - "winapi 0.3.9", + "redox_syscall 0.4.1", + "windows-sys 0.52.0", ] [[package]] @@ -1209,13 +1291,11 @@ checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d" [[package]] name = "flate2" -version = "1.0.20" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd3aec53de10fe96d7d8c565eb17f2c687bb5518a2ec453b5b1252964526abe0" +checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae" dependencies = [ - "cfg-if 1.0.0", "crc32fast", - "libc", "miniz_oxide", ] @@ -1227,11 +1307,10 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "form_urlencoded" -version = "1.0.1" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" dependencies = [ - "matches", "percent-encoding", ] @@ -1257,7 +1336,7 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" dependencies = [ - "bitflags 1.2.1", + "bitflags 1.3.2", "fuchsia-zircon-sys", ] @@ -1275,9 +1354,9 @@ checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.21" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f73fe65f54d1e12b726f517d3e2135ca3125a437b6d998caf1962961f7172d9e" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" dependencies = [ "futures-channel", "futures-core", @@ -1290,9 +1369,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.21" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", "futures-sink", @@ -1300,15 +1379,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.21" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-executor" -version = "0.3.21" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9420b90cfa29e327d0429f19be13e7ddb68fa1cccb09d65e5706b8c7a749b8a6" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" dependencies = [ "futures-core", "futures-task", @@ -1318,59 +1397,72 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.21" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" [[package]] name = "futures-lite" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7694489acd39452c77daa48516b894c153f192c3578d5a839b62c58099fcbf48" +checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" dependencies = [ - "fastrand", + "fastrand 1.9.0", "futures-core", "futures-io", "memchr", "parking", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.14", "waker-fn", ] +[[package]] +name = "futures-lite" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" +dependencies = [ + "fastrand 2.1.0", + "futures-core", + "futures-io", + "parking", + "pin-project-lite 0.2.14", +] + [[package]] name = "futures-macro" -version = "0.3.21" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ - "proc-macro2 1.0.28", - "quote 1.0.9", - "syn 1.0.74", + "proc-macro2 1.0.86", + "quote 1.0.36", + "syn 2.0.68", ] [[package]] name = "futures-sink" -version = "0.3.21" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" -version = "0.3.21" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-timer" -version = "3.0.2" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" +checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" [[package]] name = "futures-util" -version = "0.3.21" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ "futures-channel", "futures-core", @@ -1379,7 +1471,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.14", "pin-utils", "slab", ] @@ -1401,9 +1493,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.4" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", @@ -1422,38 +1514,38 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.3" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if 1.0.0", "libc", - "wasi 0.10.2+wasi-snapshot-preview1", + "wasi 0.11.0+wasi-snapshot-preview1", ] [[package]] name = "ghash" -version = "0.3.1" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97304e4cd182c3846f7575ced3890c53012ce534ad9114046b0a9e00bb30a375" +checksum = "f0d8a4362ccb29cb0b265253fb0a2728f592895ee6854fd9bc13f2ffda266ff1" dependencies = [ - "opaque-debug 0.3.0", + "opaque-debug 0.3.1", "polyval", ] [[package]] name = "gimli" -version = "0.25.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0a01e0497841a3b2db4f8afa483cce65f7e96a3498bd6c541734792aeac8fe7" +checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" [[package]] name = "git2" -version = "0.13.20" +version = "0.13.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9831e983241f8c5591ed53f17d874833e2fa82cac2625f3888c50cbfe136cba" +checksum = "f29229cc1b24c0e6062f6e742aa3e256492a5323365e5ed3413599f8a5eff7d6" dependencies = [ - "bitflags 1.2.1", + "bitflags 1.3.2", "libc", "libgit2-sys", "log", @@ -1462,56 +1554,24 @@ dependencies = [ [[package]] name = "gloo-timers" -version = "0.2.1" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47204a46aaff920a1ea58b11d03dec6f704287d27561724a4631e450654a891f" +checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" dependencies = [ "futures-channel", "futures-core", "js-sys", "wasm-bindgen", - "web-sys", -] - -[[package]] -name = "grin" -version = "4.4.2" -dependencies = [ - "blake2-rfc", - "built", - "chrono", - "clap", - "ctrlc", - "cursive", - "failure", - "failure_derive", - "futures 0.3.21", - "grin_api", - "grin_chain", - "grin_config", - "grin_core", - "grin_keychain", - "grin_p2p", - "grin_servers", - "grin_store", - "grin_util", - "humansize", - "log", - "serde", - "serde_json", - "term", ] [[package]] name = "grin_api" -version = "4.4.2" +version = "5.3.2" dependencies = [ "bytes 0.5.6", "chrono", "easy-jsonrpc-mw", - "failure", - "failure_derive", - "futures 0.3.21", + "futures 0.3.30", "grin_chain", "grin_core", "grin_p2p", @@ -1526,11 +1586,12 @@ dependencies = [ "lazy_static", "log", "regex", - "ring", + "ring 0.16.20", "rustls 0.17.0", "serde", "serde_derive", "serde_json", + "thiserror", "tokio 0.2.25", "tokio-rustls 0.13.1", "url", @@ -1538,17 +1599,15 @@ dependencies = [ [[package]] name = "grin_chain" -version = "4.4.2" +version = "5.3.2" dependencies = [ "bit-vec", - "bitflags 1.2.1", + "bitflags 1.3.2", "byteorder", "chrono", "croaring", "enum_primitive", "env_logger", - "failure", - "failure_derive", "grin_core", "grin_keychain", "grin_store", @@ -1559,15 +1618,14 @@ dependencies = [ "rand 0.6.5", "serde", "serde_derive", + "thiserror", ] [[package]] name = "grin_config" -version = "4.4.2" +version = "5.3.2" dependencies = [ "dirs 2.0.2", - "failure", - "failure_derive", "grin_core", "grin_p2p", "grin_servers", @@ -1576,20 +1634,20 @@ dependencies = [ "rand 0.6.5", "serde", "serde_derive", + "thiserror", "toml", ] [[package]] name = "grin_core" -version = "4.4.2" +version = "5.3.2" dependencies = [ "blake2-rfc", "byteorder", + "bytes 0.5.6", "chrono", "croaring", "enum_primitive", - "failure", - "failure_derive", "grin_keychain", "grin_util", "lazy_static", @@ -1602,18 +1660,17 @@ dependencies = [ "serde_derive", "serde_json", "siphasher", + "thiserror", "zeroize", ] [[package]] name = "grin_keychain" -version = "4.4.2" +version = "5.3.2" dependencies = [ "blake2-rfc", "byteorder", "digest 0.9.0", - "failure", - "failure_derive", "grin_util", "hmac 0.11.0", "lazy_static", @@ -1624,22 +1681,22 @@ dependencies = [ "serde", "serde_derive", "serde_json", - "sha2 0.9.5", + "sha2 0.9.9", + "thiserror", "zeroize", ] [[package]] name = "grin_p2p" -version = "4.4.2" +version = "5.3.2" dependencies = [ "async-std", - "bitflags 1.2.1", + "bitflags 1.3.2", + "bytes 0.5.6", "chrono", "ed25519-dalek", "enum_primitive", - "failure", - "failure_derive", - "futures 0.3.21", + "futures 0.3.30", "grin_chain", "grin_core", "grin_pool", @@ -1658,18 +1715,17 @@ dependencies = [ "serde_json", "socks", "tempfile", + "thiserror", "tokio 0.2.25", "tor-stream", ] [[package]] name = "grin_pool" -version = "4.4.2" +version = "5.3.2" dependencies = [ "blake2-rfc", "chrono", - "failure", - "failure_derive", "grin_chain", "grin_core", "grin_keychain", @@ -1679,14 +1735,15 @@ dependencies = [ "rand 0.6.5", "serde", "serde_derive", + "thiserror", ] [[package]] name = "grin_secp256k1zkp" -version = "0.7.13" -source = "git+https://github.com/mwcproject/rust-secp256k1-zkp?tag=0.7.13#efc5684c8f4f91af6ae09a4d0ff1698053eb86de" +version = "0.7.14" +source = "git+https://github.com/mwcproject/rust-secp256k1-zkp?tag=0.7.14#98726a4b3926ef24201f3c83e3e189e8db67f58b" dependencies = [ - "arrayvec 0.3.25", + "arrayvec 0.7.4", "cc", "libc", "rand 0.5.6", @@ -1697,15 +1754,14 @@ dependencies = [ [[package]] name = "grin_servers" -version = "4.4.2" +version = "5.3.2" dependencies = [ + "atomic_float", "chrono", "dirs 1.0.5", "ed25519-dalek", - "failure", - "failure_derive", "fs2", - "futures 0.3.21", + "futures 0.3.30", "grin_api", "grin_chain", "grin_core", @@ -1726,6 +1782,7 @@ dependencies = [ "serde_derive", "serde_json", "sysinfo", + "thiserror", "timer", "tokio 0.2.25", "tokio-util 0.2.0", @@ -1734,14 +1791,12 @@ dependencies = [ [[package]] name = "grin_store" -version = "4.4.2" +version = "5.3.2" dependencies = [ "byteorder", "chrono", "croaring", "env_logger", - "failure", - "failure_derive", "filetime", "grin_core", "grin_util", @@ -1753,19 +1808,18 @@ dependencies = [ "serde", "serde_derive", "tempfile", + "thiserror", ] [[package]] name = "grin_util" -version = "4.4.2" +version = "5.3.2" dependencies = [ "backtrace", "base64 0.12.3", "byteorder", "data-encoding", "ed25519-dalek", - "failure", - "failure_derive", "grin_secp256k1zkp", "lazy_static", "log", @@ -1775,6 +1829,7 @@ dependencies = [ "serde", "serde_derive", "sha3", + "thiserror", "walkdir", "zeroize", "zip", @@ -1792,7 +1847,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap", + "indexmap 1.9.3", "slab", "tokio 0.2.25", "tokio-util 0.3.1", @@ -1802,9 +1857,15 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.11.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" [[package]] name = "heck" @@ -1824,6 +1885,18 @@ dependencies = [ "libc", ] +[[package]] +name = "hermit-abi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" + +[[package]] +name = "hermit-abi" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" + [[package]] name = "hex_fmt" version = "0.3.0" @@ -1861,15 +1934,24 @@ dependencies = [ "hmac 0.7.1", ] +[[package]] +name = "home" +version = "0.5.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" +dependencies = [ + "windows-sys 0.52.0", +] + [[package]] name = "http" -version = "0.2.4" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "527e8c9ac747e28542699a951517aa9a6945af506cd1f2e1b53a576c17b6cc11" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" dependencies = [ - "bytes 1.0.1", + "bytes 1.6.0", "fnv", - "itoa", + "itoa 1.0.11", ] [[package]] @@ -1884,9 +1966,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.4.1" +version = "1.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3a87b616e37e93c22fb19bcd386f02f3af5ea98a25670ad0fce773de23c5e68" +checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" [[package]] name = "httpdate" @@ -1924,8 +2006,8 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa", - "pin-project 1.0.8", + "itoa 0.4.8", + "pin-project 1.1.5", "socket2 0.3.19", "tokio 0.2.25", "tower-service", @@ -1973,12 +2055,35 @@ dependencies = [ name = "hyper-timeout" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d1f9b0b8258e3ef8f45928021d3ef14096c2b93b99e4b8cfcabf1f58ec84b0a" +checksum = "0d1f9b0b8258e3ef8f45928021d3ef14096c2b93b99e4b8cfcabf1f58ec84b0a" +dependencies = [ + "bytes 0.5.6", + "hyper", + "tokio 0.2.25", + "tokio-io-timeout", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.60" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +dependencies = [ + "android_system_properties", + "core-foundation-sys 0.8.6", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" dependencies = [ - "bytes 0.5.6", - "hyper", - "tokio 0.2.25", - "tokio-io-timeout", + "cc", ] [[package]] @@ -1989,20 +2094,19 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" -version = "0.2.3" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" dependencies = [ - "matches", "unicode-bidi", "unicode-normalization", ] [[package]] name = "if-addrs" -version = "0.6.5" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28538916eb3f3976311f5dfbe67b5362d0add1293d0a9cad17debf86f8e3aa48" +checksum = "2273e421f7c4f0fc99e1934fe4776f59d8df2972f4199d703fc0da9f2a9f73de" dependencies = [ "if-addrs-sys", "libc", @@ -2025,9 +2129,9 @@ version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97b8538953a3f0d0d3868f0a706eb4273535e10d72acb5c82c1c23ae48835c85" dependencies = [ - "async-io", - "futures 0.3.21", - "futures-lite", + "async-io 1.13.0", + "futures 0.3.30", + "futures-lite 1.13.0", "if-addrs", "ipnet", "libc", @@ -2037,23 +2141,53 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.7.0" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg 1.3.0", + "hashbrown 0.12.3", +] + +[[package]] +name = "indexmap" +version = "2.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" +dependencies = [ + "equivalent", + "hashbrown 0.14.5", +] + +[[package]] +name = "inout" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc633605454125dec4b66843673f01c7df2b89479b32e0ed634e43a91cff62a5" +checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" dependencies = [ - "autocfg 1.0.1", - "hashbrown", + "generic-array 0.14.7", ] [[package]] name = "instant" -version = "0.1.10" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bee0328b1209d157ef001c94dd85b4f8f64139adb0eac2659f4b08382b2f474d" +checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" dependencies = [ "cfg-if 1.0.0", ] +[[package]] +name = "io-lifetimes" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" +dependencies = [ + "hermit-abi 0.3.9", + "libc", + "windows-sys 0.48.0", +] + [[package]] name = "iovec" version = "0.1.4" @@ -2065,9 +2199,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.3.1" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68f2d64f2edebec4ce84ad108148e67e1064789bee435edc5b60ad398714a3a9" +checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" [[package]] name = "itertools" @@ -2080,24 +2214,30 @@ dependencies = [ [[package]] name = "itoa" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" +checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" + +[[package]] +name = "itoa" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "jobserver" -version = "0.1.22" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "972f5ae5d1cb9c6ae417789196c803205313edde988685da5e3aae0827b9e7fd" +checksum = "d2b099aaa34a9751c5bf0878add70444e1ed2dd73f347be99003d4577277de6e" dependencies = [ "libc", ] [[package]] name = "js-sys" -version = "0.3.52" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce791b7ca6638aae45be056e068fc756d871eb3b3b10b8efa62d1c9cec616752" +checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" dependencies = [ "wasm-bindgen", ] @@ -2117,9 +2257,12 @@ dependencies = [ [[package]] name = "keccak" -version = "0.1.0" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" +checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" +dependencies = [ + "cpufeatures", +] [[package]] name = "kernel32-sys" @@ -2142,21 +2285,21 @@ dependencies = [ [[package]] name = "lazy_static" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.98" +version = "0.2.155" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "320cfe77175da3a483efed4bc0adc1968ca050b098ce4f2f1c13a56626128790" +checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" [[package]] name = "libgit2-sys" -version = "0.12.21+1.1.0" +version = "0.12.26+1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86271bacd72b2b9e854c3dcfb82efd538f15f870e4c11af66900effb462f6825" +checksum = "19e1c899248e606fbfe68dcb31d8b0176ebab833b103824af31bddf4b7457494" dependencies = [ "cc", "libc", @@ -2177,11 +2320,11 @@ dependencies = [ [[package]] name = "libp2p" version = "0.35.1" -source = "git+https://github.com/mwcproject/rust-libp2p?branch=master#fac8313fc18aab84ff5cbc89ed26102f5d28a0d2" +source = "git+https://github.com/mwcproject/rust-libp2p?branch=master#c379113484a616345cff7043733c227bb71d9e74" dependencies = [ "atomic", - "bytes 1.0.1", - "futures 0.3.21", + "bytes 1.6.0", + "futures 0.3.30", "lazy_static", "libp2p-core", "libp2p-dns", @@ -2194,8 +2337,8 @@ dependencies = [ "libp2p-tcp", "libp2p-yamux", "parity-multiaddr", - "parking_lot 0.11.1", - "pin-project 1.0.8", + "parking_lot 0.11.2", + "pin-project 1.1.5", "smallvec", "wasm-timer", ] @@ -2203,7 +2346,7 @@ dependencies = [ [[package]] name = "libp2p-core" version = "0.27.1" -source = "git+https://github.com/mwcproject/rust-libp2p?branch=master#fac8313fc18aab84ff5cbc89ed26102f5d28a0d2" +source = "git+https://github.com/mwcproject/rust-libp2p?branch=master#c379113484a616345cff7043733c227bb71d9e74" dependencies = [ "asn1_der", "bs58", @@ -2211,7 +2354,7 @@ dependencies = [ "ed25519-dalek", "either", "fnv", - "futures 0.3.21", + "futures 0.3.30", "futures-timer", "lazy_static", "libsecp256k1", @@ -2219,18 +2362,18 @@ dependencies = [ "multihash", "multistream-select", "parity-multiaddr", - "parking_lot 0.11.1", - "pin-project 1.0.8", + "parking_lot 0.11.2", + "pin-project 1.1.5", "prost", "prost-build", "rand 0.7.3", - "ring", + "ring 0.16.20", "rw-stream-sink", - "sha2 0.9.5", + "sha2 0.9.9", "sha3", "smallvec", "thiserror", - "unsigned-varint 0.7.0", + "unsigned-varint 0.7.2", "void", "zeroize", ] @@ -2238,9 +2381,9 @@ dependencies = [ [[package]] name = "libp2p-dns" version = "0.27.0" -source = "git+https://github.com/mwcproject/rust-libp2p?branch=master#fac8313fc18aab84ff5cbc89ed26102f5d28a0d2" +source = "git+https://github.com/mwcproject/rust-libp2p?branch=master#c379113484a616345cff7043733c227bb71d9e74" dependencies = [ - "futures 0.3.21", + "futures 0.3.30", "libp2p-core", "log", ] @@ -2248,14 +2391,14 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" version = "0.28.0" -source = "git+https://github.com/mwcproject/rust-libp2p?branch=master#fac8313fc18aab84ff5cbc89ed26102f5d28a0d2" +source = "git+https://github.com/mwcproject/rust-libp2p?branch=master#c379113484a616345cff7043733c227bb71d9e74" dependencies = [ "asynchronous-codec", - "base64 0.13.0", + "base64 0.13.1", "byteorder", - "bytes 1.0.1", + "bytes 1.6.0", "fnv", - "futures 0.3.21", + "futures 0.3.30", "hex_fmt", "libp2p-core", "libp2p-swarm", @@ -2264,44 +2407,44 @@ dependencies = [ "prost-build", "rand 0.7.3", "regex", - "sha2 0.9.5", + "sha2 0.9.9", "smallvec", - "unsigned-varint 0.7.0", + "unsigned-varint 0.7.2", "wasm-timer", ] [[package]] name = "libp2p-mplex" version = "0.27.1" -source = "git+https://github.com/mwcproject/rust-libp2p?branch=master#fac8313fc18aab84ff5cbc89ed26102f5d28a0d2" +source = "git+https://github.com/mwcproject/rust-libp2p?branch=master#c379113484a616345cff7043733c227bb71d9e74" dependencies = [ "asynchronous-codec", - "bytes 1.0.1", - "futures 0.3.21", + "bytes 1.6.0", + "futures 0.3.30", "libp2p-core", "log", "nohash-hasher", - "parking_lot 0.11.1", + "parking_lot 0.11.2", "rand 0.7.3", "smallvec", - "unsigned-varint 0.7.0", + "unsigned-varint 0.7.2", ] [[package]] name = "libp2p-noise" version = "0.29.0" -source = "git+https://github.com/mwcproject/rust-libp2p?branch=master#fac8313fc18aab84ff5cbc89ed26102f5d28a0d2" +source = "git+https://github.com/mwcproject/rust-libp2p?branch=master#c379113484a616345cff7043733c227bb71d9e74" dependencies = [ - "bytes 1.0.1", - "curve25519-dalek", - "futures 0.3.21", + "bytes 1.6.0", + "curve25519-dalek 3.2.0", + "futures 0.3.30", "lazy_static", "libp2p-core", "log", "prost", "prost-build", - "rand 0.7.3", - "sha2 0.9.5", + "rand 0.8.5", + "sha2 0.9.9", "snow", "static_assertions", "x25519-dalek", @@ -2311,9 +2454,9 @@ dependencies = [ [[package]] name = "libp2p-ping" version = "0.27.0" -source = "git+https://github.com/mwcproject/rust-libp2p?branch=master#fac8313fc18aab84ff5cbc89ed26102f5d28a0d2" +source = "git+https://github.com/mwcproject/rust-libp2p?branch=master#c379113484a616345cff7043733c227bb71d9e74" dependencies = [ - "futures 0.3.21", + "futures 0.3.30", "libp2p-core", "libp2p-swarm", "log", @@ -2325,10 +2468,10 @@ dependencies = [ [[package]] name = "libp2p-swarm" version = "0.27.2" -source = "git+https://github.com/mwcproject/rust-libp2p?branch=master#fac8313fc18aab84ff5cbc89ed26102f5d28a0d2" +source = "git+https://github.com/mwcproject/rust-libp2p?branch=master#c379113484a616345cff7043733c227bb71d9e74" dependencies = [ "either", - "futures 0.3.21", + "futures 0.3.30", "libp2p-core", "log", "rand 0.7.3", @@ -2340,19 +2483,19 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" version = "0.22.0" -source = "git+https://github.com/mwcproject/rust-libp2p?branch=master#fac8313fc18aab84ff5cbc89ed26102f5d28a0d2" +source = "git+https://github.com/mwcproject/rust-libp2p?branch=master#c379113484a616345cff7043733c227bb71d9e74" dependencies = [ - "quote 1.0.9", - "syn 1.0.74", + "quote 1.0.36", + "syn 1.0.109", ] [[package]] name = "libp2p-tcp" version = "0.27.1" -source = "git+https://github.com/mwcproject/rust-libp2p?branch=master#fac8313fc18aab84ff5cbc89ed26102f5d28a0d2" +source = "git+https://github.com/mwcproject/rust-libp2p?branch=master#c379113484a616345cff7043733c227bb71d9e74" dependencies = [ - "async-io", - "futures 0.3.21", + "async-io 1.13.0", + "futures 0.3.30", "futures-timer", "if-addrs", "if-watch", @@ -2361,7 +2504,7 @@ dependencies = [ "libp2p-core", "log", "socket2 0.3.19", - "tokio 1.9.0", + "tokio 1.38.0", ] [[package]] @@ -2370,7 +2513,7 @@ version = "0.7.1" source = "git+https://github.com/mwcproject/rust-libp2p-tokio-socks5?branch=master#69bd822cc0bd1021244e4eb4c8fd59df3e80214a" dependencies = [ "data-encoding", - "futures 0.3.21", + "futures 0.3.30", "futures-timer", "if-addrs", "ipnet", @@ -2384,15 +2527,25 @@ dependencies = [ [[package]] name = "libp2p-yamux" version = "0.30.1" -source = "git+https://github.com/mwcproject/rust-libp2p?branch=master#fac8313fc18aab84ff5cbc89ed26102f5d28a0d2" +source = "git+https://github.com/mwcproject/rust-libp2p?branch=master#c379113484a616345cff7043733c227bb71d9e74" dependencies = [ - "futures 0.3.21", + "futures 0.3.30", "libp2p-core", - "parking_lot 0.11.1", + "parking_lot 0.11.2", "thiserror", "yamux", ] +[[package]] +name = "libredox" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +dependencies = [ + "bitflags 2.6.0", + "libc", +] + [[package]] name = "libsecp256k1" version = "0.3.5" @@ -2411,9 +2564,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.3" +version = "1.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de5435b8549c16d423ed0c03dbaafe57cf6c3344744f1242520d59c9d8ecec66" +checksum = "c15da26e5af7e25c90b37a2d75cdbf940cf4a55316de9d84c679c9b8bfabf82e" dependencies = [ "cc", "libc", @@ -2423,9 +2576,21 @@ dependencies = [ [[package]] name = "linked-hash-map" -version = "0.5.4" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" + +[[package]] +name = "linux-raw-sys" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" + +[[package]] +name = "linux-raw-sys" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "lmdb-zero" @@ -2450,20 +2615,20 @@ dependencies = [ [[package]] name = "lock_api" -version = "0.4.4" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0382880606dff6d15c9476c416d18690b72742aa7b605bb6dd6ec9030fbf07eb" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ + "autocfg 1.3.0", "scopeguard", ] [[package]] name = "log" -version = "0.4.14" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" +checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" dependencies = [ - "cfg-if 1.0.0", "serde", "value-bag", ] @@ -2514,23 +2679,11 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" -[[package]] -name = "matches" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" - -[[package]] -name = "maybe-uninit" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" - [[package]] name = "memchr" -version = "2.4.0" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b16bd47d9e329435e309c58469fe0791c2d0d1ba96ec0954152a5ae2b04387dc" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "memmap" @@ -2542,23 +2695,13 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "memoffset" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59accc507f1338036a0477ef61afdae33cde60840f4dfe481319ce3ad116ddf9" -dependencies = [ - "autocfg 1.0.1", -] - [[package]] name = "miniz_oxide" -version = "0.4.4" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" +checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" dependencies = [ "adler", - "autocfg 1.0.1", ] [[package]] @@ -2582,15 +2725,13 @@ dependencies = [ [[package]] name = "mio" -version = "0.7.13" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c2bdb6314ec10835cd3293dd268473a835c02b7b352e788be788b3c6ca6bb16" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", - "log", - "miow 0.3.7", - "ntapi", - "winapi 0.3.9", + "wasi 0.11.0+wasi-snapshot-preview1", + "windows-sys 0.48.0", ] [[package]] @@ -2644,9 +2785,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4dac63698b887d2d929306ea48b63760431ff8a24fac40ddb22f9c7f49fb7cab" dependencies = [ "digest 0.9.0", - "generic-array 0.14.4", + "generic-array 0.14.7", "multihash-derive", - "sha2 0.9.5", + "sha2 0.9.9", "unsigned-varint 0.5.1", ] @@ -2658,9 +2799,9 @@ checksum = "424f6e86263cd5294cbd7f1e95746b95aca0e0d66bff31e5a40d6baa87b4aa99" dependencies = [ "proc-macro-crate", "proc-macro-error", - "proc-macro2 1.0.28", - "quote 1.0.9", - "syn 1.0.74", + "proc-macro2 1.0.86", + "quote 1.0.36", + "syn 1.0.109", "synstructure", ] @@ -2673,14 +2814,43 @@ checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] name = "multistream-select" version = "0.10.1" -source = "git+https://github.com/mwcproject/rust-libp2p?branch=master#fac8313fc18aab84ff5cbc89ed26102f5d28a0d2" +source = "git+https://github.com/mwcproject/rust-libp2p?branch=master#c379113484a616345cff7043733c227bb71d9e74" dependencies = [ - "bytes 1.0.1", - "futures 0.3.21", + "bytes 1.6.0", + "futures 0.3.30", "log", - "pin-project 1.0.8", + "pin-project 1.1.5", "smallvec", - "unsigned-varint 0.7.0", + "unsigned-varint 0.7.2", +] + +[[package]] +name = "mwc" +version = "5.3.2" +dependencies = [ + "blake2-rfc", + "built", + "chrono", + "clap", + "ctrlc", + "cursive", + "cursive_table_view", + "futures 0.3.30", + "grin_api", + "grin_chain", + "grin_config", + "grin_core", + "grin_keychain", + "grin_p2p", + "grin_servers", + "grin_store", + "grin_util", + "humansize", + "log", + "serde", + "serde_json", + "term", + "thiserror", ] [[package]] @@ -2696,9 +2866,9 @@ dependencies = [ [[package]] name = "net2" -version = "0.2.37" +version = "0.2.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" +checksum = "b13b648036a2339d06de780866fbdfda0dde886de7b3af2ddeba8b14f4ee34ac" dependencies = [ "cfg-if 0.1.10", "libc", @@ -2707,13 +2877,13 @@ dependencies = [ [[package]] name = "nix" -version = "0.20.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa9b4819da1bc61c0ea48b63b7bc8604064dd43013e7cc325df098d49cd7c18a" +checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" dependencies = [ - "bitflags 1.2.1", - "cc", + "bitflags 2.6.0", "cfg-if 1.0.0", + "cfg_aliases", "libc", ] @@ -2729,15 +2899,6 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" -[[package]] -name = "ntapi" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" -dependencies = [ - "winapi 0.3.9", -] - [[package]] name = "num" version = "0.2.1" @@ -2749,20 +2910,20 @@ dependencies = [ "num-integer", "num-iter", "num-rational 0.2.4", - "num-traits 0.2.14", + "num-traits 0.2.19", ] [[package]] name = "num" -version = "0.3.1" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b7a8e9be5e039e2ff869df49155f1c06bd01ade2117ec783e56ab0932b67a8f" +checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" dependencies = [ - "num-complex 0.3.1", + "num-complex 0.4.6", "num-integer", "num-iter", - "num-rational 0.3.2", - "num-traits 0.2.14", + "num-rational 0.4.2", + "num-traits 0.2.19", ] [[package]] @@ -2771,9 +2932,9 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" dependencies = [ - "autocfg 1.0.1", + "autocfg 1.3.0", "num-integer", - "num-traits 0.2.14", + "num-traits 0.2.19", ] [[package]] @@ -2782,38 +2943,43 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6b19411a9719e753aff12e5187b74d60d3dc449ec3f4dc21e3989c3f554bc95" dependencies = [ - "autocfg 1.0.1", - "num-traits 0.2.14", + "autocfg 1.3.0", + "num-traits 0.2.19", ] [[package]] name = "num-complex" -version = "0.3.1" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "747d632c0c558b87dbabbe6a82f3b4ae03720d0646ac5b7b4dae89394be5f2c5" +checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" dependencies = [ - "num-traits 0.2.14", + "num-traits 0.2.19", ] +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + [[package]] name = "num-integer" -version = "0.1.44" +version = "0.1.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" dependencies = [ - "autocfg 1.0.1", - "num-traits 0.2.14", + "num-traits 0.2.19", ] [[package]] name = "num-iter" -version = "0.1.42" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2021c8337a54d21aca0d59a92577a029af9431cb59b909b03252b9c164fad59" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" dependencies = [ - "autocfg 1.0.1", + "autocfg 1.3.0", "num-integer", - "num-traits 0.2.14", + "num-traits 0.2.19", ] [[package]] @@ -2822,21 +2988,20 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" dependencies = [ - "autocfg 1.0.1", + "autocfg 1.3.0", "num-bigint", "num-integer", - "num-traits 0.2.14", + "num-traits 0.2.19", ] [[package]] name = "num-rational" -version = "0.3.2" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12ac428b1cb17fce6f731001d307d351ec70a6d202fc2e60f7d4c5e42d8f4f07" +checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" dependencies = [ - "autocfg 1.0.1", "num-integer", - "num-traits 0.2.14", + "num-traits 0.2.19", ] [[package]] @@ -2845,48 +3010,51 @@ version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "92e5113e9fd4cc14ded8e499429f396a20f98c772a47cc8622a736e1ec843c31" dependencies = [ - "num-traits 0.2.14", + "num-traits 0.2.19", ] [[package]] name = "num-traits" -version = "0.2.14" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ - "autocfg 1.0.1", + "autocfg 1.3.0", ] [[package]] name = "num_cpus" -version = "1.13.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi", + "hermit-abi 0.3.9", "libc", ] [[package]] -name = "object" -version = "0.26.0" +name = "num_threads" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c55827317fb4c08822499848a14237d2874d6f139828893017237e7ab93eb386" +checksum = "5c7398b9c8b70908f6371f47ed36737907c87c52af34c268fed0bf0ceb92ead9" dependencies = [ - "memchr", + "libc", ] [[package]] -name = "odds" -version = "0.2.26" +name = "object" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4eae0151b9dacf24fcc170d9995e511669a082856a91f958a2fe380bfab3fb22" +checksum = "081b846d1d56ddfc18fdf1a922e4f6e07a11768ea1b92dec44e42b72712ccfce" +dependencies = [ + "memchr", +] [[package]] name = "once_cell" -version = "1.8.0" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "opaque-debug" @@ -2896,15 +3064,15 @@ checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" [[package]] name = "opaque-debug" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" [[package]] name = "openssl-probe" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "ordered-float" @@ -2912,14 +3080,14 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3305af35278dd29f46fcdd139e0b1fbfae2153f0e5928b39b035542dd31e37b7" dependencies = [ - "num-traits 0.2.14", + "num-traits 0.2.19", ] [[package]] name = "output_vt100" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53cdc5b785b7a58c5aad8216b3dfa114df64b0b06ae6e1501cef91df2fbdf8f9" +checksum = "628223faebab4e3e40667ee0b2336d34a5b960ff60ea743ddfdbcf7770bcfb66" dependencies = [ "winapi 0.3.9", ] @@ -2935,9 +3103,9 @@ dependencies = [ [[package]] name = "pancurses" -version = "0.16.1" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3058bc37c433096b2ac7afef1c5cdfae49ede0a4ffec3dfc1df1df0959d0ff0" +checksum = "0352975c36cbacb9ee99bfb709b9db818bed43af57751797f8633649759d13db" dependencies = [ "libc", "log", @@ -2949,7 +3117,7 @@ dependencies = [ [[package]] name = "parity-multiaddr" version = "0.11.1" -source = "git+https://github.com/mwcproject/rust-libp2p?branch=master#fac8313fc18aab84ff5cbc89ed26102f5d28a0d2" +source = "git+https://github.com/mwcproject/rust-libp2p?branch=master#c379113484a616345cff7043733c227bb71d9e74" dependencies = [ "arrayref", "bs58", @@ -2959,15 +3127,15 @@ dependencies = [ "percent-encoding", "serde", "static_assertions", - "unsigned-varint 0.7.0", + "unsigned-varint 0.7.2", "url", ] [[package]] name = "parking" -version = "2.0.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" +checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" [[package]] name = "parking_lot" @@ -2976,25 +3144,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3a704eb390aafdc107b0e392f56a82b668e3a71366993b5340f5833fd62505e" dependencies = [ "lock_api 0.3.4", - "parking_lot_core 0.7.2", + "parking_lot_core 0.7.3", ] [[package]] name = "parking_lot" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb" +checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" dependencies = [ "instant", - "lock_api 0.4.4", - "parking_lot_core 0.8.3", + "lock_api 0.4.12", + "parking_lot_core 0.8.6", ] [[package]] name = "parking_lot_core" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d58c7c768d4ba344e3e8d72518ac13e259d7c7ade24167003b8488e10b6740a3" +checksum = "b93f386bb233083c799e6e642a9d73db98c24a5deeb95ffc85bf281255dffc98" dependencies = [ "cfg-if 0.1.10", "cloudabi", @@ -3006,26 +3174,26 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.8.3" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7a782938e745763fe6907fc6ba86946d72f49fe7e21de074e08128a99fb018" +checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" dependencies = [ "cfg-if 1.0.0", "instant", "libc", - "redox_syscall 0.2.9", + "redox_syscall 0.2.16", "smallvec", "winapi 0.3.9", ] [[package]] name = "password-hash" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd482dfb8cfba5a93ec0f91e1c0f66967cb2fdc1a8dba646c4f9202c5d05d785" +checksum = "77e0b28ace46c5a396546bcf443bf422b57049617433d8854227352a4a9b24e7" dependencies = [ "base64ct", - "rand_core 0.6.3", + "rand_core 0.6.4", "subtle 2.4.1", ] @@ -3039,7 +3207,7 @@ dependencies = [ "crypto-mac 0.11.1", "hmac 0.11.0", "password-hash", - "sha2 0.9.5", + "sha2 0.9.9", ] [[package]] @@ -3054,9 +3222,9 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.1.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "petgraph" @@ -3065,47 +3233,47 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "467d164a6de56270bd7c4d070df81d07beace25012d5103ced4e9ff08d6afdb7" dependencies = [ "fixedbitset", - "indexmap", + "indexmap 1.9.3", ] [[package]] name = "pin-project" -version = "0.4.28" +version = "0.4.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "918192b5c59119d51e0cd221f4d49dde9112824ba717369e903c97d076083d0f" +checksum = "3ef0f924a5ee7ea9cbcea77529dba45f8a9ba9f622419fe3386ca581a3ae9d5a" dependencies = [ - "pin-project-internal 0.4.28", + "pin-project-internal 0.4.30", ] [[package]] name = "pin-project" -version = "1.0.8" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "576bc800220cc65dac09e99e97b08b358cfab6e17078de8dc5fee223bd2d0c08" +checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" dependencies = [ - "pin-project-internal 1.0.8", + "pin-project-internal 1.1.5", ] [[package]] name = "pin-project-internal" -version = "0.4.28" +version = "0.4.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be26700300be6d9d23264c73211d8190e755b6b5ca7a1b28230025511b52a5e" +checksum = "851c8d0ce9bebe43790dedfc86614c23494ac9f423dd618d3a61fc693eafe61e" dependencies = [ - "proc-macro2 1.0.28", - "quote 1.0.9", - "syn 1.0.74", + "proc-macro2 1.0.86", + "quote 1.0.36", + "syn 1.0.109", ] [[package]] name = "pin-project-internal" -version = "1.0.8" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e8fe8163d14ce7f0cdac2e040116f22eac817edabff0be91e8aff7e9accf389" +checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ - "proc-macro2 1.0.28", - "quote 1.0.9", - "syn 1.0.74", + "proc-macro2 1.0.86", + "quote 1.0.36", + "syn 2.0.68", ] [[package]] @@ -3116,9 +3284,9 @@ checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" [[package]] name = "pin-project-lite" -version = "0.2.7" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d31d11c69a6b52a174b42bdc0c30e5e11670f90788b2c471c31c1d17d449443" +checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" [[package]] name = "pin-utils" @@ -3126,51 +3294,88 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "piper" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae1d5c74c9876f070d3e8fd503d748c7d974c3e48da8f41350fa5222ef9b4391" +dependencies = [ + "atomic-waker", + "fastrand 2.1.0", + "futures-io", +] + [[package]] name = "pkg-config" -version = "0.3.19" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" +checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" [[package]] name = "polling" -version = "2.1.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92341d779fa34ea8437ef4d82d440d5e1ce3f3ff7f824aa64424cd481f9a1f25" +checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" dependencies = [ + "autocfg 1.3.0", + "bitflags 1.3.2", "cfg-if 1.0.0", + "concurrent-queue", "libc", "log", - "wepoll-ffi", - "winapi 0.3.9", + "pin-project-lite 0.2.14", + "windows-sys 0.48.0", +] + +[[package]] +name = "polling" +version = "3.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3ed00ed3fbf728b5816498ecd316d1716eecaced9c0c8d2c5a6740ca214985b" +dependencies = [ + "cfg-if 1.0.0", + "concurrent-queue", + "hermit-abi 0.4.0", + "pin-project-lite 0.2.14", + "rustix 0.38.34", + "tracing", + "windows-sys 0.52.0", ] [[package]] name = "poly1305" -version = "0.6.2" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b7456bc1ad2d4cf82b3a016be4c2ac48daf11bf990c1603ebd447fe6f30fca8" +checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" dependencies = [ - "cpuid-bool", + "cpufeatures", + "opaque-debug 0.3.1", "universal-hash", ] [[package]] name = "polyval" -version = "0.4.5" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eebcc4aa140b9abd2bc40d9c3f7ccec842679cd79045ac3a7ac698c1a064b7cd" +checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25" dependencies = [ - "cpuid-bool", - "opaque-debug 0.3.0", + "cfg-if 1.0.0", + "cpufeatures", + "opaque-debug 0.3.1", "universal-hash", ] +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + [[package]] name = "ppv-lite86" -version = "0.2.10" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "pretty_assertions" @@ -3178,7 +3383,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f81e1644e1b54f5a68959a29aa86cde704219254669da328ecfdf6a1f09d427" dependencies = [ - "ansi_term", + "ansi_term 0.11.0", "ctor", "difference", "output_vt100", @@ -3186,12 +3391,12 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "1.0.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41fdbd1df62156fbc5945f4762632564d7d038153091c3fcf1067f6aef7cff92" +checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" dependencies = [ - "thiserror", - "toml", + "once_cell", + "toml_edit", ] [[package]] @@ -3201,9 +3406,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.28", - "quote 1.0.9", - "syn 1.0.74", + "proc-macro2 1.0.86", + "quote 1.0.36", + "syn 1.0.109", "version_check", ] @@ -3213,17 +3418,11 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.28", - "quote 1.0.9", + "proc-macro2 1.0.86", + "quote 1.0.36", "version_check", ] -[[package]] -name = "proc-macro-hack" -version = "0.5.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" - [[package]] name = "proc-macro2" version = "0.4.30" @@ -3235,11 +3434,11 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.28" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c7ed8b8c7b886ea3ed7dde405212185f423ab44682667c8c6dd14aa1d9f6612" +checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" dependencies = [ - "unicode-xid 0.2.2", + "unicode-ident", ] [[package]] @@ -3248,7 +3447,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e6984d2f1a23009bd270b8bb56d0926810a3d483f59c987d77969e9d8e840b2" dependencies = [ - "bytes 1.0.1", + "bytes 1.6.0", "prost-derive", ] @@ -3258,7 +3457,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32d3ebd75ac2679c2af3a92246639f9fcc8a442ee420719cc4fe195b98dd5fa3" dependencies = [ - "bytes 1.0.1", + "bytes 1.6.0", "heck", "itertools", "log", @@ -3278,9 +3477,9 @@ checksum = "169a15f3008ecb5160cba7d37bcd690a7601b6d30cfb87a117d45e59d52af5d4" dependencies = [ "anyhow", "itertools", - "proc-macro2 1.0.28", - "quote 1.0.9", - "syn 1.0.74", + "proc-macro2 1.0.86", + "quote 1.0.36", + "syn 1.0.109", ] [[package]] @@ -3289,7 +3488,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b518d7cdd93dab1d1122cf07fa9a60771836c668dde9d9e2a139f957f0d9f1bb" dependencies = [ - "bytes 1.0.1", + "bytes 1.6.0", "prost", ] @@ -3310,11 +3509,11 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.9" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" dependencies = [ - "proc-macro2 1.0.28", + "proc-macro2 1.0.86", ] [[package]] @@ -3336,7 +3535,7 @@ version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca" dependencies = [ - "autocfg 0.1.7", + "autocfg 0.1.8", "libc", "rand_chacha 0.1.1", "rand_core 0.4.2", @@ -3364,14 +3563,13 @@ dependencies = [ [[package]] name = "rand" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha 0.3.1", - "rand_core 0.6.3", - "rand_hc 0.3.1", + "rand_core 0.6.4", ] [[package]] @@ -3380,7 +3578,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef" dependencies = [ - "autocfg 0.1.7", + "autocfg 0.1.8", "rand_core 0.3.1", ] @@ -3401,7 +3599,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.3", + "rand_core 0.6.4", ] [[package]] @@ -3430,11 +3628,11 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.3", + "getrandom 0.2.15", ] [[package]] @@ -3455,15 +3653,6 @@ dependencies = [ "rand_core 0.5.1", ] -[[package]] -name = "rand_hc" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" -dependencies = [ - "rand_core 0.6.3", -] - [[package]] name = "rand_isaac" version = "0.1.1" @@ -3504,7 +3693,7 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44" dependencies = [ - "autocfg 0.1.7", + "autocfg 0.1.8", "rand_core 0.4.2", ] @@ -3519,27 +3708,22 @@ dependencies = [ [[package]] name = "rayon" -version = "1.5.1" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06aca804d41dbc8ba42dfd964f0d01334eceb64314b9ecf7c5fad5188a06d90" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" dependencies = [ - "autocfg 1.0.1", - "crossbeam-deque", "either", "rayon-core", ] [[package]] name = "rayon-core" -version = "1.9.1" +version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d78120e2c850279833f1dd3582f730c4ab53ed95aeaaaa862a2a5c71b1656d8e" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" dependencies = [ - "crossbeam-channel 0.5.1", "crossbeam-deque", - "crossbeam-utils 0.8.5", - "lazy_static", - "num_cpus", + "crossbeam-utils", ] [[package]] @@ -3559,11 +3743,20 @@ checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" [[package]] name = "redox_syscall" -version = "0.2.9" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" +dependencies = [ + "bitflags 1.3.2", +] + +[[package]] +name = "redox_syscall" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ab49abadf3f9e1c4bc499e8845e152ad87d2ad2d30371841171169e9d75feee" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" dependencies = [ - "bitflags 1.2.1", + "bitflags 1.3.2", ] [[package]] @@ -3579,39 +3772,43 @@ dependencies = [ [[package]] name = "redox_users" -version = "0.4.0" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64" +checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" dependencies = [ - "getrandom 0.2.3", - "redox_syscall 0.2.9", + "getrandom 0.2.15", + "libredox", + "thiserror", ] [[package]] name = "regex" -version = "1.5.4" +version = "1.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461" +checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f" dependencies = [ "aho-corasick", "memchr", + "regex-automata", "regex-syntax", ] [[package]] -name = "regex-syntax" -version = "0.6.25" +name = "regex-automata" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" +checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] [[package]] -name = "remove_dir_all" -version = "0.5.3" +name = "regex-syntax" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi 0.3.9", -] +checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" [[package]] name = "ring" @@ -3622,12 +3819,27 @@ dependencies = [ "cc", "libc", "once_cell", - "spin", - "untrusted", + "spin 0.5.2", + "untrusted 0.7.1", "web-sys", "winapi 0.3.9", ] +[[package]] +name = "ring" +version = "0.17.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" +dependencies = [ + "cc", + "cfg-if 1.0.0", + "getrandom 0.2.15", + "libc", + "spin 0.9.8", + "untrusted 0.9.0", + "windows-sys 0.52.0", +] + [[package]] name = "ripemd160" version = "0.9.1" @@ -3636,7 +3848,7 @@ checksum = "2eca4ecc81b7f313189bf73ce724400a07da2a6dac19588b03c8bd76a2dcc251" dependencies = [ "block-buffer 0.9.0", "digest 0.9.0", - "opaque-debug 0.3.0", + "opaque-debug 0.3.1", ] [[package]] @@ -3645,25 +3857,52 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b18820d944b33caa75a71378964ac46f58517c92b6ae5f762636247c09e78fb" dependencies = [ - "base64 0.13.0", + "base64 0.13.1", "blake2b_simd", "constant_time_eq", - "crossbeam-utils 0.8.5", + "crossbeam-utils", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" + +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver 1.0.23", ] [[package]] -name = "rustc-demangle" -version = "0.1.20" +name = "rustix" +version = "0.37.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dead70b0b5e03e9c814bcb6b01e03e68f7c57a80aa48c72ec92152ab3e818d49" +checksum = "fea8ca367a3a01fe35e6943c400addf443c0f57670e6ec51196f71a4b8762dd2" +dependencies = [ + "bitflags 1.3.2", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys 0.3.8", + "windows-sys 0.48.0", +] [[package]] -name = "rustc_version" -version = "0.2.3" +name = "rustix" +version = "0.38.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" dependencies = [ - "semver", + "bitflags 2.6.0", + "errno", + "libc", + "linux-raw-sys 0.4.14", + "windows-sys 0.52.0", ] [[package]] @@ -3674,7 +3913,7 @@ checksum = "c0d4a31f5d68413404705d6982529b0e11a9aacd4839d1d6222ee3b8cb4015e1" dependencies = [ "base64 0.11.0", "log", - "ring", + "ring 0.16.20", "sct", "webpki", ] @@ -3687,7 +3926,7 @@ checksum = "5d1126dcf58e93cee7d098dbda643b5f92ed724f1f6a63007c1116eed6700c81" dependencies = [ "base64 0.12.3", "log", - "ring", + "ring 0.16.20", "sct", "webpki", ] @@ -3710,16 +3949,16 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4da5fcb054c46f5a5dff833b129285a93d3f0179531735e6c866e8cc307d2020" dependencies = [ - "futures 0.3.21", - "pin-project 0.4.28", + "futures 0.3.30", + "pin-project 0.4.30", "static_assertions", ] [[package]] name = "ryu" -version = "1.0.5" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "same-file" @@ -3732,19 +3971,18 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.19" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" +checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" dependencies = [ - "lazy_static", - "winapi 0.3.9", + "windows-sys 0.52.0", ] [[package]] name = "scopeguard" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "sct" @@ -3752,8 +3990,8 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" dependencies = [ - "ring", - "untrusted", + "ring 0.16.20", + "untrusted 0.7.1", ] [[package]] @@ -3762,9 +4000,9 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64808902d7d99f78eaddd2b4e2509713babc3dc3c85ad6f4c447680f3c01e535" dependencies = [ - "bitflags 1.2.1", + "bitflags 1.3.2", "core-foundation", - "core-foundation-sys", + "core-foundation-sys 0.7.0", "libc", "security-framework-sys", ] @@ -3775,7 +4013,7 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17bf11d99252f512695eb468de5516e5cf75455521e69dfe343f3b74e4748405" dependencies = [ - "core-foundation-sys", + "core-foundation-sys 0.7.0", "libc", ] @@ -3789,6 +4027,12 @@ dependencies = [ "serde", ] +[[package]] +name = "semver" +version = "1.0.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" + [[package]] name = "semver-parser" version = "0.7.0" @@ -3797,9 +4041,9 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.127" +version = "1.0.203" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f03b9878abf6d14e6779d3f24f07b2cfa90352cfec4acc5aab8f1ac7f146fae8" +checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094" dependencies = [ "serde_derive", ] @@ -3816,34 +4060,34 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.127" +version = "1.0.203" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a024926d3432516606328597e0f224a51355a493b49fdd67e9209187cbe55ecc" +checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba" dependencies = [ - "proc-macro2 1.0.28", - "quote 1.0.9", - "syn 1.0.74", + "proc-macro2 1.0.86", + "quote 1.0.36", + "syn 2.0.68", ] [[package]] name = "serde_json" -version = "1.0.66" +version = "1.0.118" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "336b10da19a12ad094b59d870ebde26a45402e5b470add4b5fd03c5048a32127" +checksum = "d947f6b3163d8857ea16c4fa0dd4840d52f3041039a85decd46867eb1abef2e4" dependencies = [ - "itoa", + "itoa 1.0.11", "ryu", "serde", ] [[package]] name = "serde_yaml" -version = "0.8.17" +version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15654ed4ab61726bf918a39cb8d98a2e2995b002387807fa6ba58fdf7f59bb23" +checksum = "578a7433b776b56a35785ed5ce9a7e777ac0598aac5a6dd1b4b18a307c7fc71b" dependencies = [ - "dtoa", - "linked-hash-map", + "indexmap 1.9.3", + "ryu", "serde", "yaml-rust 0.4.5", ] @@ -3862,15 +4106,26 @@ dependencies = [ [[package]] name = "sha2" -version = "0.9.5" +version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b362ae5752fd2137731f9fa25fd4d9058af34666ca1966fb969119cc35719f12" +checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" dependencies = [ "block-buffer 0.9.0", "cfg-if 1.0.0", "cpufeatures", "digest 0.9.0", - "opaque-debug 0.3.0", + "opaque-debug 0.3.1", +] + +[[package]] +name = "sha2" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +dependencies = [ + "cfg-if 1.0.0", + "cpufeatures", + "digest 0.10.7", ] [[package]] @@ -3888,9 +4143,9 @@ dependencies = [ [[package]] name = "signal-hook" -version = "0.1.17" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e31d442c16f047a671b5a71e2161d6e68814012b7f5379d269ebd915fac2729" +checksum = "8621587d4798caf8eb44879d42e56b9a93ea5dcd315a6487c357130095b62801" dependencies = [ "libc", "signal-hook-registry", @@ -3898,53 +4153,55 @@ dependencies = [ [[package]] name = "signal-hook-registry" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" dependencies = [ "libc", ] [[package]] name = "signature" -version = "1.3.1" +version = "1.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c19772be3c4dd2ceaacf03cb41d5885f2a02c4d8804884918e3a258480803335" +checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" [[package]] name = "siphasher" -version = "0.3.6" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "729a25c17d72b06c68cb47955d44fda88ad2d3e7d77e025663fdd69b93dd71a1" +checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" [[package]] name = "slab" -version = "0.4.3" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f173ac3d1a7e3b28003f40de0b5ce7fe2710f9b9dc3fc38664cebee46b3b6527" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +dependencies = [ + "autocfg 1.3.0", +] [[package]] name = "smallvec" -version = "1.6.1" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "snow" -version = "0.7.2" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "795dd7aeeee24468e5a32661f6d27f7b5cbed802031b2d7640c7b10f8fb2dd50" +checksum = "850948bee068e713b8ab860fe1adc4d109676ab4c3b621fd8147f06b261f2f85" dependencies = [ "aes-gcm", "blake2", "chacha20poly1305", - "rand 0.7.3", - "rand_core 0.5.1", - "ring", + "curve25519-dalek 4.1.3", + "rand_core 0.6.4", + "ring 0.17.8", "rustc_version", - "sha2 0.9.5", + "sha2 0.10.8", "subtle 2.4.1", - "x25519-dalek", ] [[package]] @@ -3960,24 +4217,33 @@ dependencies = [ [[package]] name = "socket2" -version = "0.4.1" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "765f090f0e423d2b55843402a07915add955e7d60657db13707a159727326cad" +checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" dependencies = [ "libc", "winapi 0.3.9", ] +[[package]] +name = "socket2" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + [[package]] name = "socks" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30f86c7635fadf2814201a4f67efefb0007588ae7422ce299f354ab5c97f61ae" +checksum = "f0c3dbbd9ae980613c6dd8e28a9407b50509d3803b57624d5dfe8315218cd58b" dependencies = [ "byteorder", "libc", - "winapi 0.2.8", - "ws2_32-sys", + "winapi 0.3.9", ] [[package]] @@ -3986,6 +4252,12 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + [[package]] name = "stable_deref_trait" version = "1.2.0" @@ -3998,28 +4270,12 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" -[[package]] -name = "stream-cipher" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c80e15f898d8d8f25db24c253ea615cc14acf418ff307822995814e7d42cfa89" -dependencies = [ - "block-cipher", - "generic-array 0.14.4", -] - [[package]] name = "strsim" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" -[[package]] -name = "strsim" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" - [[package]] name = "subtle" version = "1.0.0" @@ -4051,25 +4307,36 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.74" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1873d832550d4588c3dbc20f01361ab00bfe741048f71e3fecf145a7cc18b29c" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.28", - "quote 1.0.9", - "unicode-xid 0.2.2", + "proc-macro2 1.0.86", + "quote 1.0.36", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.68" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "901fa70d88b9d6c98022e23b4136f9f3e54e4662c3bc1bd1d84a42a9a0f0c1e9" +dependencies = [ + "proc-macro2 1.0.86", + "quote 1.0.36", + "unicode-ident", ] [[package]] name = "synstructure" -version = "0.12.5" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "474aaa926faa1603c40b7885a9eaea29b444d1cb2850cb7c0e37bb1a4182f4fa" +checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ - "proc-macro2 1.0.28", - "quote 1.0.9", - "syn 1.0.74", - "unicode-xid 0.2.2", + "proc-macro2 1.0.86", + "quote 1.0.36", + "syn 1.0.109", + "unicode-xid 0.2.4", ] [[package]] @@ -4087,16 +4354,14 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.2.0" +version = "3.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" +checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ "cfg-if 1.0.0", - "libc", - "rand 0.8.4", - "redox_syscall 0.2.9", - "remove_dir_all", - "winapi 0.3.9", + "fastrand 2.1.0", + "rustix 0.38.34", + "windows-sys 0.52.0", ] [[package]] @@ -4121,9 +4386,9 @@ dependencies = [ [[package]] name = "termcolor" -version = "1.1.2" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" dependencies = [ "winapi-util", ] @@ -4139,22 +4404,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.26" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93119e4feac1cbe6c798c34d3a53ea0026b0b1de6a120deef895137c0529bfe2" +checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.26" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "060d69a0afe7796bf42e9e2ff91f5ee691fb15c53d38b4b62a9a53eb23164745" +checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" dependencies = [ - "proc-macro2 1.0.28", - "quote 1.0.9", - "syn 1.0.74", + "proc-macro2 1.0.86", + "quote 1.0.36", + "syn 2.0.68", ] [[package]] @@ -4170,46 +4435,60 @@ dependencies = [ [[package]] name = "time" -version = "0.1.43" +version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ + "deranged", + "itoa 1.0.11", "libc", - "winapi 0.3.9", + "num-conv", + "num_threads", + "powerfmt", + "serde", + "time-core", + "time-macros", ] [[package]] -name = "timer" -version = "0.2.0" +name = "time-core" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31d42176308937165701f50638db1c31586f183f1aab416268216577aec7306b" +checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" + +[[package]] +name = "time-macros" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" dependencies = [ - "chrono", + "num-conv", + "time-core", ] [[package]] -name = "tiny-keccak" -version = "2.0.2" +name = "timer" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +checksum = "31d42176308937165701f50638db1c31586f183f1aab416268216577aec7306b" dependencies = [ - "crunchy", + "chrono", ] [[package]] name = "tinyvec" -version = "1.3.1" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "848a1e1181b9f6753b5e96a092749e29b11d19ede67dfbbd6c7dc7e0f49b5338" +checksum = "c55115c6fbe2d2bef26eb09ad74bde02d8255476fc0c7b515ef09fbb35742d82" dependencies = [ "tinyvec_macros", ] [[package]] name = "tinyvec_macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" @@ -4237,15 +4516,16 @@ dependencies = [ [[package]] name = "tokio" -version = "1.9.0" +version = "1.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b7b349f11a7047e6d1276853e612d152f5e8a352c61917887cc2169e2366b4c" +checksum = "ba4f4a02a7a80d6f274636f0aa95c7e383b912d41fe721a31f29e29698585a4a" dependencies = [ - "autocfg 1.0.1", + "backtrace", "libc", - "mio 0.7.13", - "pin-project-lite 0.2.7", - "winapi 0.3.9", + "mio 0.8.11", + "pin-project-lite 0.2.14", + "socket2 0.5.7", + "windows-sys 0.48.0", ] [[package]] @@ -4264,9 +4544,9 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e44da00bfc73a25f814cd8d7e57a68a5c31b74b3152a0a1d1f590c97ed06265a" dependencies = [ - "proc-macro2 1.0.28", - "quote 1.0.9", - "syn 1.0.74", + "proc-macro2 1.0.86", + "quote 1.0.36", + "syn 1.0.109", ] [[package]] @@ -4301,7 +4581,7 @@ checksum = "1997788a0e25e09300e44680ba1ef9d44d6f634a883641f80109e8b59c928daf" dependencies = [ "bytes 0.4.12", "either", - "futures 0.3.21", + "futures 0.3.30", "thiserror", "tokio 0.2.25", ] @@ -4336,13 +4616,30 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.8" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" +checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" dependencies = [ "serde", ] +[[package]] +name = "toml_datetime" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf" + +[[package]] +name = "toml_edit" +version = "0.19.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" +dependencies = [ + "indexmap 2.2.6", + "toml_datetime", + "winnow", +] + [[package]] name = "tor-stream" version = "0.2.0" @@ -4355,29 +4652,28 @@ dependencies = [ [[package]] name = "tower-service" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.26" +version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09adeb8c97449311ccd28a427f96fb563e7fd31aabf994189879d9da2394b89d" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ - "cfg-if 1.0.0", "log", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.14", "tracing-core", ] [[package]] name = "tracing-core" -version = "0.1.18" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9ff14f98b1a4b289c6248a023c1c2fa1491062964e9fed67ab29c4e4da4a052" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" dependencies = [ - "lazy_static", + "once_cell", ] [[package]] @@ -4386,7 +4682,7 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" dependencies = [ - "pin-project 1.0.8", + "pin-project 1.1.5", "tracing", ] @@ -4398,9 +4694,9 @@ checksum = "efd1f82c56340fdf16f2a953d7bda4f8fdffba13d93b00844c25572110b26079" [[package]] name = "try-lock" -version = "0.2.3" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "typemap" @@ -4413,39 +4709,42 @@ dependencies = [ [[package]] name = "typenum" -version = "1.13.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "879f6906492a7cd215bfa4cf595b600146ccfac0c79bcbd1f3000162af5e8b06" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "unicode-bidi" -version = "0.3.5" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eeb8be209bb1c96b7c177c7420d26e04eccacb0eeae6b980e35fcb74678107e0" -dependencies = [ - "matches", -] +checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" + +[[package]] +name = "unicode-ident" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-normalization" -version = "0.1.19" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" +checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" dependencies = [ "tinyvec", ] [[package]] name = "unicode-segmentation" -version = "1.8.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8895849a949e7845e06bd6dc1aa51731a103c42707010a5b591c0038fb73385b" +checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" [[package]] name = "unicode-width" -version = "0.1.8" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3" +checksum = "0336d538f7abc86d282a4189614dfaa90810dfc2c6f6427eaf88e16311dd225d" [[package]] name = "unicode-xid" @@ -4455,17 +4754,17 @@ checksum = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" [[package]] name = "unicode-xid" -version = "0.2.2" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" +checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" [[package]] name = "universal-hash" -version = "0.4.1" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f214e8f697e925001e66ec2c6e37a4ef93f0f78c2eed7814394e10c62025b05" +checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" dependencies = [ - "generic-array 0.14.4", + "crypto-common", "subtle 2.4.1", ] @@ -4486,12 +4785,12 @@ checksum = "f7fdeedbf205afadfe39ae559b75c3240f24e257d0ca27e85f85cb82aa19ac35" [[package]] name = "unsigned-varint" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f8d425fafb8cd76bc3f22aace4af471d3156301d7508f2107e98fbeae10bc7f" +checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" dependencies = [ "asynchronous-codec", - "bytes 1.0.1", + "bytes 1.6.0", ] [[package]] @@ -4500,27 +4799,28 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + [[package]] name = "url" -version = "2.2.2" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" +checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" dependencies = [ "form_urlencoded", "idna", - "matches", "percent-encoding", ] [[package]] name = "value-bag" -version = "1.0.0-alpha.7" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd320e1520f94261153e96f7534476ad869c14022aee1e59af7c778075d840ae" -dependencies = [ - "ctor", - "version_check", -] +checksum = "5a84c137d37ab0142f0f2ddfe332651fdbf252e7b7dbb4e67b6c1f1b2e925101" [[package]] name = "vcpkg" @@ -4536,9 +4836,9 @@ checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] name = "version_check" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fecdca9a5291cc2b8dcf7dc02453fee791a280f3743cb0905f8822ae463b3fe" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "void" @@ -4548,28 +4848,26 @@ checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" [[package]] name = "waker-fn" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" +checksum = "317211a0dc0ceedd78fb2ca9a44aed3d7b9b26f81870d485c07122b4350673b7" [[package]] name = "walkdir" -version = "2.3.2" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" dependencies = [ "same-file", - "winapi 0.3.9", "winapi-util", ] [[package]] name = "want" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" dependencies = [ - "log", "try-lock", ] @@ -4581,15 +4879,15 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasi" -version = "0.10.2+wasi-snapshot-preview1" +version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.75" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b608ecc8f4198fe8680e2ed18eccab5f0cd4caaf3d83516fa5fb2e927fda2586" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -4597,24 +4895,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.75" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "580aa3a91a63d23aac5b6b267e2d13cb4f363e31dce6c352fca4752ae12e479f" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" dependencies = [ "bumpalo", - "lazy_static", "log", - "proc-macro2 1.0.28", - "quote 1.0.9", - "syn 1.0.74", + "once_cell", + "proc-macro2 1.0.86", + "quote 1.0.36", + "syn 2.0.68", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.25" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16646b21c3add8e13fdb8f20172f8a28c3dbf62f45406bcff0233188226cfe0c" +checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -4624,32 +4922,32 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.75" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "171ebf0ed9e1458810dfcb31f2e766ad6b3a89dbda42d8901f2b268277e5f09c" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" dependencies = [ - "quote 1.0.9", + "quote 1.0.36", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.75" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c2657dd393f03aa2a659c25c6ae18a13a4048cebd220e147933ea837efc589f" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ - "proc-macro2 1.0.28", - "quote 1.0.9", - "syn 1.0.74", + "proc-macro2 1.0.86", + "quote 1.0.36", + "syn 2.0.68", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.75" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e0c4a743a309662d45f4ede961d7afa4ba4131a59a639f29b0069c3798bbcc2" +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" [[package]] name = "wasm-timer" @@ -4657,9 +4955,9 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be0ecb0db480561e9a7642b5d3e4187c128914e58aa84330b9493e3eb68c5e7f" dependencies = [ - "futures 0.3.21", + "futures 0.3.30", "js-sys", - "parking_lot 0.11.1", + "parking_lot 0.11.2", "pin-utils", "wasm-bindgen", "wasm-bindgen-futures", @@ -4668,9 +4966,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.52" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01c70a82d842c9979078c772d4a1344685045f1a5628f677c2b2eab4dd7d2696" +checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" dependencies = [ "js-sys", "wasm-bindgen", @@ -4682,8 +4980,8 @@ version = "0.21.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" dependencies = [ - "ring", - "untrusted", + "ring 0.16.20", + "untrusted 0.7.1", ] [[package]] @@ -4695,24 +4993,16 @@ dependencies = [ "webpki", ] -[[package]] -name = "wepoll-ffi" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d743fdedc5c64377b5fc2bc036b01c7fd642205a0d96356034ae3404d49eb7fb" -dependencies = [ - "cc", -] - [[package]] name = "which" -version = "4.2.2" +version = "4.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea187a8ef279bc014ec368c27a920da2024d2a711109bfbe3440585d5cf27ad9" +checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" dependencies = [ "either", - "lazy_static", - "libc", + "home", + "once_cell", + "rustix 0.38.34", ] [[package]] @@ -4745,11 +5035,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.5" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b" dependencies = [ - "winapi 0.3.9", + "windows-sys 0.52.0", ] [[package]] @@ -4758,6 +5048,163 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows-core" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" +dependencies = [ + "windows-targets 0.52.5", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.5", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" +dependencies = [ + "windows_aarch64_gnullvm 0.52.5", + "windows_aarch64_msvc 0.52.5", + "windows_i686_gnu 0.52.5", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.5", + "windows_x86_64_gnu 0.52.5", + "windows_x86_64_gnullvm 0.52.5", + "windows_x86_64_msvc 0.52.5", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" + +[[package]] +name = "winnow" +version = "0.5.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" +dependencies = [ + "memchr", +] + [[package]] name = "winreg" version = "0.5.1" @@ -4783,16 +5230,16 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a0c105152107e3b96f6a00a65e86ce82d9b125230e1c4302940eca58ff71f4f" dependencies = [ - "curve25519-dalek", + "curve25519-dalek 3.2.0", "rand_core 0.5.1", "zeroize", ] [[package]] name = "xi-unicode" -version = "0.2.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e71b85d8b1b8bfaf4b5c834187554d201a8cd621c2bbfa33efd41a3ecabd48b2" +checksum = "a67300977d3dc3f8034dae89778f502b6ba20b269527b3223ba59c0cf393bb8a" [[package]] name = "yaml-rust" @@ -4815,33 +5262,52 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1cc7bd8c983209ed5d527f44b01c41b7dc146fd960c61cf9e1d25399841dc271" dependencies = [ - "futures 0.3.21", + "futures 0.3.30", "log", "nohash-hasher", - "parking_lot 0.11.1", + "parking_lot 0.11.2", "rand 0.7.3", "static_assertions", ] +[[package]] +name = "zerocopy" +version = "0.7.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" +dependencies = [ + "proc-macro2 1.0.86", + "quote 1.0.36", + "syn 2.0.68", +] + [[package]] name = "zeroize" -version = "1.4.1" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "377db0846015f7ae377174787dd452e1c5f5a9050bc6f954911d01f116daa0cd" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" dependencies = [ "zeroize_derive", ] [[package]] name = "zeroize_derive" -version = "1.1.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2c1e130bebaeab2f23886bf9acbaca14b092408c452543c857f66399cd6dab1" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ - "proc-macro2 1.0.28", - "quote 1.0.9", - "syn 1.0.74", - "synstructure", + "proc-macro2 1.0.86", + "quote 1.0.36", + "syn 2.0.68", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 40042e5e9f..46171774a4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "mwc" -version = "4.4.2" +version = "5.3.2" authors = ["Grin Developers "] description = "Simple, private and scalable cryptocurrency implementation based on the MimbleWimble chain format." license = "Apache-2.0" @@ -24,27 +24,26 @@ blake2-rfc = "0.2" chrono = "0.4.11" clap = { version = "2.33", features = ["yaml"] } ctrlc = { version = "3.1", features = ["termination"] } -failure = "0.1" -failure_derive = "0.1" -futures = "0.3.19" +cursive_table_view = "0.14.0" humansize = "1.1.0" -log = "0.4" serde = "1" +futures = "0.3.19" serde_json = "1" +log = "0.4" term = "0.6" +thiserror = "1" - -grin_api = { path = "./api", version = "4.4.2" } -grin_config = { path = "./config", version = "4.4.2" } -grin_chain = { path = "./chain", version = "4.4.2" } -grin_core = { path = "./core", version = "4.4.2" } -grin_keychain = { path = "./keychain", version = "4.4.2" } -grin_p2p = { path = "./p2p", version = "4.4.2" } -grin_servers = { path = "./servers", version = "4.4.2" } -grin_util = { path = "./util", version = "4.4.2" } +grin_api = { path = "./api", version = "5.3.2" } +grin_config = { path = "./config", version = "5.3.2" } +grin_chain = { path = "./chain", version = "5.3.2" } +grin_core = { path = "./core", version = "5.3.2" } +grin_keychain = { path = "./keychain", version = "5.3.2" } +grin_p2p = { path = "./p2p", version = "5.3.2" } +grin_servers = { path = "./servers", version = "5.3.2" } +grin_util = { path = "./util", version = "5.3.2" } [dependencies.cursive] -version = "0.15" +version = "0.20" default-features = false features = ["pancurses-backend"] @@ -52,5 +51,9 @@ features = ["pancurses-backend"] built = { version = "0.4", features = ["git2"]} [dev-dependencies] -grin_chain = { path = "./chain", version = "4.4.2" } -grin_store = { path = "./store", version = "4.4.2" } +grin_chain = { path = "./chain", version = "5.3.2" } +grin_store = { path = "./store", version = "5.3.2" } + +[profile.release-with-debug] +inherits = "release" +debug = true \ No newline at end of file diff --git a/api/Cargo.toml b/api/Cargo.toml index 1b3c114570..28c10b80b6 100644 --- a/api/Cargo.toml +++ b/api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "grin_api" -version = "4.4.2" +version = "5.3.2" authors = ["Grin Developers "] description = "APIs for grin, a simple, private and scalable cryptocurrency implementation based on the MimbleWimble chain format." license = "Apache-2.0" @@ -11,8 +11,6 @@ edition = "2018" [dependencies] easy-jsonrpc-mw = "0.5.4" -failure = "0.1.1" -failure_derive = "0.1.1" hyper = "0.13" lazy_static = "1" regex = "1" @@ -20,6 +18,7 @@ ring = "0.16" serde = "1" serde_derive = "1" serde_json = "1" +thiserror = "1" log = "0.4" tokio = { version = "0.2", features = ["full"] } tokio-rustls = "0.13" @@ -31,12 +30,12 @@ url = "2.1" bytes = "0.5" chrono = { version = "0.4.11", features = ["serde"] } -grin_core = { path = "../core", version = "4.4.2" } -grin_chain = { path = "../chain", version = "4.4.2" } -grin_p2p = { path = "../p2p", version = "4.4.2" } -grin_pool = { path = "../pool", version = "4.4.2" } -grin_store = { path = "../store", version = "4.4.2" } -grin_util = { path = "../util", version = "4.4.2" } +grin_core = { path = "../core", version = "5.3.2" } +grin_chain = { path = "../chain", version = "5.3.2" } +grin_p2p = { path = "../p2p", version = "5.3.2" } +grin_pool = { path = "../pool", version = "5.3.2" } +grin_store = { path = "../store", version = "5.3.2" } +grin_util = { path = "../util", version = "5.3.2" } # NOTE. We can't have hyper-rustls the same version for Android and non android. because if how rust builds dependency. # Android must have v0.20+ diff --git a/api/src/auth.rs b/api/src/auth.rs index 7db3a6b6d4..2a1544e21e 100644 --- a/api/src/auth.rs +++ b/api/src/auth.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/api/src/client.rs b/api/src/client.rs index 4a1254ce81..f08e80d2ca 100644 --- a/api/src/client.rs +++ b/api/src/client.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,17 +15,14 @@ //! High level JSON/HTTP client API use crate::core::global; -use crate::rest::{Error, ErrorKind}; +use crate::rest::Error; use crate::util::to_base64; -use failure::Fail; -use http::uri::{InvalidUri, Uri}; +use http::uri::Uri; use hyper::body; use hyper::header::{ACCEPT, AUTHORIZATION, CONTENT_TYPE, USER_AGENT}; use hyper::{Body, Client, Request}; -use hyper_rustls; use hyper_timeout::TimeoutConnector; use serde::{Deserialize, Serialize}; -use serde_json; use std::time::Duration; use tokio::runtime::Builder; @@ -194,13 +191,11 @@ fn build_request_ex( body: Option, ) -> Result, Error> { // Checking only. Uri has issues with Builder 'uri()' method - let _ = url.parse::().map_err::(|e: InvalidUri| { - e.context(ErrorKind::Argument(format!("Invalid url {}", url))) - .into() - })?; + let _ = url + .parse::() + .map_err::(|e| Error::Argument(format!("Invalid url {}, {}", url, e)))?; let mut builder = Request::builder(); - if basic_auth_key.is_some() && api_secret.is_some() { let basic_auth = format!( "Basic {}", @@ -223,9 +218,7 @@ fn build_request_ex( None => Body::empty(), Some(json) => json.into(), }) - .map_err(|e| { - ErrorKind::RequestError(format!("Bad request {} {}: {}", method, url, e)).into() - }) + .map_err(|e| Error::RequestError(format!("Bad request {} {}: {}", method, url, e))) } pub fn create_post_request( @@ -237,7 +230,7 @@ where IN: Serialize, { let json = serde_json::to_string(input).map_err(|e| { - ErrorKind::Internal(format!("Post Request, Can't serialize data to JSON, {}", e)) + Error::Internal(format!("Post Request, Can't serialize data to JSON, {}", e)) })?; build_request(url, "POST", api_secret, Some(json)) } @@ -252,7 +245,7 @@ where IN: Serialize, { let json = serde_json::to_string(input).map_err(|e| { - ErrorKind::Internal(format!("Post Request, Can't serialize data to JSON, {}", e)) + Error::Internal(format!("Post Request, Can't serialize data to JSON, {}", e)) })?; build_request_ex(url, "POST", api_secret, basic_auth_key, Some(json)) } @@ -262,9 +255,8 @@ where for<'de> T: Deserialize<'de>, { let data = send_request(req, timeout)?; - serde_json::from_str(&data).map_err(|e| { - ErrorKind::ResponseError(format!("Cannot parse response: {}, {}", data, e)).into() - }) + serde_json::from_str(&data) + .map_err(|e| Error::ResponseError(format!("Cannot parse response: {}, {}", data, e))) } async fn handle_request_async(req: Request) -> Result @@ -273,18 +265,18 @@ where { let data = send_request_async(req, TimeOut::default()).await?; let ser = serde_json::from_str(&data) - .map_err(|e| ErrorKind::ResponseError(format!("Cannot parse response: {}, {}", data, e)))?; + .map_err(|e| Error::ResponseError(format!("Cannot parse response: {}, {}", data, e)))?; Ok(ser) } async fn send_request_async(req: Request, timeout: TimeOut) -> Result { let https = hyper_rustls::HttpsConnector::new(); - let mut connector = TimeoutConnector::new(https); let (connect, read, write) = ( Some(timeout.connect), Some(timeout.read), Some(timeout.write), ); + let mut connector = TimeoutConnector::new(https); connector.set_connect_timeout(connect); connector.set_read_timeout(read); connector.set_write_timeout(write); @@ -293,22 +285,21 @@ async fn send_request_async(req: Request, timeout: TimeOut) -> Result, timeout: TimeOut) -> Result +pub struct Foreign where B: BlockChain, P: PoolAdapter, - V: VerifierCache + 'static, { pub peers: Weak, pub chain: Weak, - pub tx_pool: Weak>>, + pub tx_pool: Weak>>, pub sync_state: Weak, } -impl Foreign +impl Foreign where B: BlockChain, P: PoolAdapter, - V: VerifierCache + 'static, { /// Create a new API instance with the chain, transaction pool, peers and `sync_state`. All subsequent /// API calls will operate on this instance of node API. @@ -68,7 +65,6 @@ where /// * `peers` - A non-owning reference of the peers. /// * `chain` - A non-owning reference of the chain. /// * `tx_pool` - A non-owning reference of the transaction pool. - /// * `peers` - A non-owning reference of the peers. /// * `sync_state` - A non-owning reference of the `sync_state`. /// /// # Returns @@ -78,7 +74,7 @@ where pub fn new( peers: Weak, chain: Weak, - tx_pool: Weak>>, + tx_pool: Weak>>, sync_state: Weak, ) -> Self { Foreign { @@ -151,6 +147,36 @@ where ) } + /// Returns a [`BlockListing`](types/struct.BlockListing.html) of available blocks + /// between `min_height` and `max_height` + /// The method will query the database for blocks starting at the block height `min_height` + /// and continue until `max_height`, skipping any blocks that aren't available. + /// + /// # Arguments + /// * `start_height` - starting height to lookup. + /// * `end_height` - ending height to to lookup. + /// * 'max` - The max number of blocks to return. + /// Note this is overriden with BLOCK_TRANSFER_LIMIT if BLOCK_TRANSFER_LIMIT is exceeded + /// + /// # Returns + /// * Result Containing: + /// * A [`BlockListing`](types/struct.BlockListing.html) + /// * or [`Error`](struct.Error.html) if an error is encountered. + /// + + pub fn get_blocks( + &self, + start_height: u64, + end_height: u64, + max: u64, + include_proof: Option, + ) -> Result { + let block_handler = BlockHandler { + chain: self.chain.clone(), + }; + block_handler.get_blocks(start_height, end_height, max, include_proof) + } + /// Returns the node version and block header version (used by grin-wallet). /// /// # Returns @@ -358,21 +384,16 @@ where let pool_handler = PoolHandler { tx_pool: self.tx_pool.clone(), }; - match pool_handler.push_transaction(tx, fluff) { - Ok(_) => Ok(()), - Err(e) => { - warn!( - "Unable to push transaction {} into the pool, {}", - tx_hash, e - ); - Err(e) - } - } + pool_handler.push_transaction(tx, fluff).map_err(|e| { + warn!( + "Unable to push transaction {} into the pool, {}", + tx_hash, e + ); + e + }) } - /// Get TOR address on this node. Return none if TOR is not running. pub fn get_libp2p_peers(&self) -> Result { - //get_server_onion_address() let libp2p_peers: Vec = libp2p_connection::get_libp2p_connections() .iter() .map(|peer| peer.to_string()) @@ -380,8 +401,9 @@ where let node_peers = if let Some(peers) = self.peers.upgrade() { let connected_peers: Vec = peers - .connected_peers() .iter() + .connected() + .into_iter() .map(|peer| peer.info.addr.tor_address().unwrap_or("".to_string())) .filter(|addr| !addr.is_empty()) .collect(); diff --git a/api/src/foreign_rpc.rs b/api/src/foreign_rpc.rs index 4e72166671..28066fdfab 100644 --- a/api/src/foreign_rpc.rs +++ b/api/src/foreign_rpc.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,16 +16,14 @@ use crate::core::core::hash::Hash; use crate::core::core::transaction::Transaction; -use crate::core::core::verifier_cache::VerifierCache; use crate::foreign::Foreign; use crate::pool::PoolEntry; use crate::pool::{BlockChain, PoolAdapter}; -use crate::rest::ErrorKind; +use crate::rest::Error; use crate::types::{ - BlockHeaderPrintable, BlockPrintable, LocatedTxKernel, OutputListing, OutputPrintable, Tip, - Version, + BlockHeaderPrintable, BlockListing, BlockPrintable, LocatedTxKernel, OutputListing, + OutputPrintable, Tip, Version, }; - use crate::{util, Libp2pMessages, Libp2pPeers}; /// Public definition used to generate Node jsonrpc api. @@ -128,7 +126,7 @@ pub trait ForeignRpc: Sync + Send { height: Option, hash: Option, commit: Option, - ) -> Result; + ) -> Result; /** Networked version of [Foreign::get_block](struct.Foreign.html#method.get_block). @@ -226,7 +224,7 @@ pub trait ForeignRpc: Sync + Send { { "block_height": 374274, "commit": "09d33615563ba2d65acc2b295a024337166b9f520122d49730c73e8bfb43017610", - "merkle_proof": "00000000003e6f5e000000000000000f60fe09a7601a519d9be71135404580ad9de0964c70a7619b1731dca2cd8c1ae1dce9f544df671d63ff0e05b58f070cb48e163ca8f44fb4446c9fe1fc9cfef90e4b81e7119e8cf60acb9515363ecaea1ce20d2a8ea2f6f638f79a33a19d0d7b54cfff3daf8d21c243ba4ccd2c0fbda833edfa2506b1b326059d124e0c2e27cda90268e66f2dcc7576efac9ebbb831894d7776c191671c3294c2ca0af23201498a7f5ce98d5440ca24116b40ac98b1c5e38b28c8b560afc4f4684b81ab34f8cf162201040d4779195ba0e4967d1dd8184b579208e9ebebafa2f5004c51f5902a94bf268fd498f0247e8ba1a46efec8d88fa44d5ecb206fbe728ee56c24af36442eba416ea4d06e1ea267309bc2e6f961c57069e2525d17e78748254729d7fdec56720aa85fe6d89b2756a7eeed0a7aa5d13cfb874e3c65576ec8a15d6df17d7d4856653696b10fb9ec205f5e4d1c7a1f3e2dd2994b12eeed93e84776d8dcd8a5d78aecd4f96ae95c0b090d104adf2aa84f0a1fbd8d319fea5476d1a306b2800716e60b00115a5cca678617361c5a89660b4536c56254bc8dd7035d96f05de62b042d16acaeff57c111fdf243b859984063e3fcfdf40c4c4a52889706857a7c3e90e264f30f40cc87bd20e74689f14284bc5ea0a540950dfcc8d33c503477eb1c60", + "merkle_proof": null, "mmr_index": 4091742, "output_type": "Coinbase", "proof": "7adae7bcecf735c70eaa21e8fdce1d3c83d7b593f082fc29e16ff2c64ee5aaa15b682e5583257cf351de457dda8f877f4d8c1492af3aaf25cf5f496fce7ca54a0ef78cc61c4252c490386f3c69132960e9edc811add6415a6026d53d604414a5f4dd330a63fcbb005ba908a45b2fb1950a9529f793405832e57c89a36d3920715bc2d43db16a718ecd19aeb23428b5d3eeb89d73c28272a7f2b39b8923e777d8eb2c5ce9872353ba026dc79fdb093a6538868b4d184215afc29a9f90548f9c32aa663f9197fea1cadbb28d40d35ed79947b4b2b722e30e877a15aa2ecf95896faad173af2e2795b36ce342dfdacf13a2f4f273ab9927371f52913367d1d58246a0c35c8f0d2330fcddb9eec34c277b1cfdaf7639eec2095930b2adef17e0eb94f32e071bf1c607d2ef1757d66647477335188e5afc058c07fe0440a67804fbdd5d35d850391ead3e9c8a3136ae1c42a33d5b01fb2c6ec84a465df3f74358cbc28542036ae4ef3e63046fbd2bce6b12f829ed193fb51ea87790e88f1ea686d943c46714b076fb8c6be7c577bca5b2792e63d5f7b8f6018730b6f9ddaf5758a5fa6a3859d68b317ad4383719211e78f2ca832fd34c6a222a8488e40519179209ad1979f3095b7b7ba7f57e81c371989a4ace465149b0fe576d89473bc596c54cee663fbf78196e7eb31e4d56604c5226e9242a68bda95e1b45473c52f63fe865901839e82079a9935e25fe8d44e339484ba0a62d20857c6b3f15ab5c56b59c7523b63f86fa8977e3f4c35dc8b1c446c48a28947f9d9bd9992763404bcba95f94b45d643f07bb7c352bfad30809c741938b103a44218696206ca1e18f0b10b222d8685cc1ed89d5fdb0c7258b66486e35c0fd560a678864fd64c642b2b689a0c46d1be6b402265b7808cd61a95c2b4a4df280e3f0ec090197fb039d32538d05d3f0a082f5", @@ -246,7 +244,7 @@ pub trait ForeignRpc: Sync + Send { height: Option, hash: Option, commit: Option, - ) -> Result; + ) -> Result; /** Networked version with all parameters of [Foreign::get_block](struct.Node.html#method.get_block). @@ -367,7 +365,252 @@ pub trait ForeignRpc: Sync + Send { commit: Option, include_proof: Option, include_merkle_proof: Option, - ) -> Result; + ) -> Result; + + /** + Networked version of [Foreign::get_blocks](struct.Foreign.html#method.get_blocks). + + # Json rpc example + + ``` + # grin_api::doctest_helper_json_rpc_foreign_assert_response!( + # r#" + { + "jsonrpc": "2.0", + "method": "get_blocks", + "params": [2299309, 2300309, 2, false], + "id": 1 + } + # "# + # , + # r#" + { + "id": 1, + "jsonrpc": "2.0", + "result": { + "Ok": { + "blocks": [ + { + "header": { + "cuckoo_solution": [ + 20354215, + 100524565, + 169529296, + 259818619, + 261952555, + 265003136, + 290685286, + 307792709, + 329993483, + 331550733, + 478902211, + 707186317, + 717277083, + 742312701, + 763869950, + 785680094, + 791217416, + 1156641404, + 1244452354, + 1277970471, + 1405106926, + 1663783361, + 1701259732, + 1795507572, + 1845900835, + 2060172013, + 2067055232, + 2169213199, + 2191128830, + 2253855427, + 2626425322, + 2678973678, + 2815586448, + 2921010487, + 3042894274, + 3103031603, + 3492595971, + 3603041347, + 3853538391, + 3974438280, + 4199558832, + 4262968379 + ], + "edge_bits": 32, + "hash": "0004331bb122685f12644e40b163e4557951b2b835ad2493502750ea787af7cc", + "height": 2299309, + "kernel_mmr_size": 8568165, + "kernel_root": "6b4adb9ee193ad043910b5a8c1bac0864ab99f57845101a3b422031bcf5c2ce1", + "nonce": 4185528505858938389, + "output_mmr_size": 13524183, + "output_root": "b642891741b56adaf7762813490d161377d0fbf7b47170d235beef33c25a4d77", + "prev_root": "a0ba3206b6a8089ef05690d40767c41cc0514eaa5031ebce1960a7cc2edcc211", + "previous": "000207548609a9007eacd7dfcdc8006252d6b1ad70864ea8ddebe4ca9e82bd74", + "range_proof_root": "d8cefda00f325fd9a1223454f23276b73d8a1d7c72ec74cdfb9bdf5c77a04dee", + "secondary_scaling": 0, + "timestamp": "2023-06-05T20:18:45+00:00", + "total_difficulty": 2072532663425232, + "total_kernel_offset": "b0a0c21326532b4a91c18d2355aedca4d8ed68b77db9882feb85da8120b4f533", + "version": 5 + }, + "inputs": [ + "092b140b1500812ac58ef68c17a2bbf2ec3531bcf0ce4dc32bbf8a29351d1784d7", + "083b72230921abeacd637dae8505233ab035c20dff1bfdab5ff5bb41b2f5238458" + ], + "kernels": [ + { + "excess": "08ab720dc374f099e6726e2dceada508a0331bb1f13b8a4e56afde83ff42f7a351", + "excess_sig": "6858120e9758d7587e27fd5dc9c26117a2ce0d5a7d871ce805e03eb494bfa1f86a27991865b3ab709064c43692433fd58f008c3bba2c88ad5f95a0c8ff3cf11f", + "features": "Plain", + "fee": 23500000, + "fee_shift": 0, + "lock_height": 0 + }, + { + "excess": "08d0a44b22952b03b29e3d88391102c281dcab4763def22cab65ed45e35b9078e8", + "excess_sig": "32f91d5671e334a87843a8b02c550c9e0fbdfe507ee62417cc123b5078d7884701a42e257357a1bed9dc4a8e07540b1629e9fa95a05c44adb5cb001c8fb777ee", + "features": "Coinbase", + "fee": 0, + "fee_shift": 0, + "lock_height": 0 + } + ], + "outputs": [ + { + "block_height": 2299309, + "commit": "0857c94df51dd226fa0c5920aae6d73d069603f973b2e06551698c6d39fdc2c192", + "merkle_proof": null, + "mmr_index": 13524176, + "output_type": "Coinbase", + "proof": null, + "proof_hash": "0937291a8a3c81cea4421fa0d0b291aacb5d46065cfd93747a15f58d99d781b6", + "spent": false + }, + { + "block_height": 2299309, + "commit": "08d4681b904695edee6e183cd40564ea0f5589b35d4d386da2eb980a6a92b1b307", + "merkle_proof": null, + "mmr_index": 0, + "output_type": "Transaction", + "proof": null, + "proof_hash": "41694ab6dcd9b1664ca28e79c3302144b99a4c1cb45d13c8728604c1d26e37bf", + "spent": true + }, + { + "block_height": 2299309, + "commit": "08255a260a65fc87cfd924780d896eaadb42468b0fe3ba6adeace378793b5d8172", + "merkle_proof": null, + "mmr_index": 13524182, + "output_type": "Transaction", + "proof": null, + "proof_hash": "58c77a5716ec4806dbddac64a83d6e4351b6eeffca391be1b11ec74aac0514dc", + "spent": false + } + ] + }, + { + "header": { + "cuckoo_solution": [ + 898450, + 353949138, + 440882514, + 500154010, + 555236503, + 615120852, + 740100750, + 754668484, + 1056458121, + 1071299788, + 1130460099, + 1414281857, + 1444894533, + 1481124421, + 1551877341, + 1666859923, + 1682642953, + 1837365586, + 1845508478, + 1872787697, + 2040619654, + 2078971700, + 2104947318, + 2206501084, + 2233951742, + 2360961460, + 2378988856, + 2402500295, + 2438384422, + 2532261092, + 2879360933, + 3011869457, + 3023365279, + 3412207020, + 3509607650, + 3793770861, + 3850043972, + 3873426868, + 3965579806, + 4007877324, + 4090157476, + 4141650723 + ], + "edge_bits": 32, + "hash": "00006871e1fb8e7dddcc46343d7fbba14d08946c67b4568f3c2e98ec8c554ae9", + "height": 2299310, + "kernel_mmr_size": 8568166, + "kernel_root": "87184dc2f9efa6467ce797191c5d3ef086403d0103ba0b5adc6a71ed203a053c", + "nonce": 13726392224838330049, + "output_mmr_size": 13524184, + "output_root": "9570fbccef29609c5d3c68b07771bf4e7e80d0b139d9bd0215d1e9d1aaaed813", + "prev_root": "df1c67366b9cdd8deea570534a00a320748899e146288be067c0f402038e6aa0", + "previous": "0004331bb122685f12644e40b163e4557951b2b835ad2493502750ea787af7cc", + "range_proof_root": "987d7aff01e201269d4c6b00e885b9ed9c10f47205edd7727e3490aab953ca80", + "secondary_scaling": 0, + "timestamp": "2023-06-05T20:19:27+00:00", + "total_difficulty": 2072532872584027, + "total_kernel_offset": "b0a0c21326532b4a91c18d2355aedca4d8ed68b77db9882feb85da8120b4f533", + "version": 5 + }, + "inputs": [], + "kernels": [ + { + "excess": "08224a7946a75071b127af45496ddd3fc438db325cc35c3e4b0fdf23ed27703dd8", + "excess_sig": "d8c81bd8130c30016e38655a32b4c7a1f8fffda34a736dd8cdbcad05d28d09e3708d1f01e21276747eb03f28b9f5a834cb0ef8532330183df2b10d47ae7e68c6", + "features": "Coinbase", + "fee": 0, + "fee_shift": 0, + "lock_height": 0 + } + ], + "outputs": [ + { + "block_height": 2299310, + "commit": "09997d3c1eff72b7efa7bfb52032d713f5907755838c01a6e178a87a0ac170a279", + "merkle_proof": null, + "mmr_index": 13524184, + "output_type": "Coinbase", + "proof": null, + "proof_hash": "6c2c10af5c4b6d2bcf71084c2bd9685ae91427f03a8b78736ab27d6c5bc7e4db", + "spent": false + } + ] + } + ], + "last_retrieved_height": 2299310 + } + } + } + # "# + # ); + ``` + */ + fn get_blocks( + &self, + start_height: u64, + end_height: u64, + max: u64, + include_proof: Option, + ) -> Result; /** Networked version of [Foreign::get_version](struct.Foreign.html#method.get_version). @@ -400,7 +643,7 @@ pub trait ForeignRpc: Sync + Send { # ); ``` */ - fn get_version(&self) -> Result; + fn get_version(&self) -> Result; /** Networked version of [Foreign::get_tip](struct.Foreign.html#method.get_tip). @@ -435,7 +678,7 @@ pub trait ForeignRpc: Sync + Send { # ); ``` */ - fn get_tip(&self) -> Result; + fn get_tip(&self) -> Result; /** Networked version of [Foreign::get_kernel](struct.Foreign.html#method.get_kernel). @@ -478,7 +721,7 @@ pub trait ForeignRpc: Sync + Send { excess: String, min_height: Option, max_height: Option, - ) -> Result; + ) -> Result; /** Networked version of [Foreign::get_outputs](struct.Foreign.html#method.get_outputs). @@ -565,7 +808,7 @@ pub trait ForeignRpc: Sync + Send { end_height: Option, include_proof: Option, include_merkle_proof: Option, - ) -> Result, ErrorKind>; + ) -> Result, Error>; /** Networked version of [Foreign::get_unspent_outputs](struct.Foreign.html#method.get_unspent_outputs). @@ -626,7 +869,7 @@ pub trait ForeignRpc: Sync + Send { end_index: Option, max: u64, include_proof: Option, - ) -> Result; + ) -> Result; /** Networked version of [Foreign::get_pmmr_indices](struct.Foreign.html#method.get_pmmr_indices). @@ -663,7 +906,7 @@ pub trait ForeignRpc: Sync + Send { &self, start_block_height: u64, end_block_height: Option, - ) -> Result; + ) -> Result; /** Networked version of [Foreign::get_pool_size](struct.Foreign.html#method.get_pool_size). @@ -693,7 +936,7 @@ pub trait ForeignRpc: Sync + Send { # ); ``` */ - fn get_pool_size(&self) -> Result; + fn get_pool_size(&self) -> Result; /** Networked version of [Foreign::get_stempool_size](struct.Foreign.html#method.get_stempool_size). @@ -723,7 +966,7 @@ pub trait ForeignRpc: Sync + Send { # ); ``` */ - fn get_stempool_size(&self) -> Result; + fn get_stempool_size(&self) -> Result; /** Networked version of [Foreign::get_unconfirmed_transactions](struct.Foreign.html#method.get_unconfirmed_transactions). @@ -796,7 +1039,7 @@ pub trait ForeignRpc: Sync + Send { # ); ``` */ - fn get_unconfirmed_transactions(&self) -> Result, ErrorKind>; + fn get_unconfirmed_transactions(&self) -> Result, Error>; /** Networked version of [Foreign::push_transaction](struct.Foreign.html#method.push_transaction). @@ -861,7 +1104,7 @@ pub trait ForeignRpc: Sync + Send { # ); ``` */ - fn push_transaction(&self, tx: Transaction, fluff: Option) -> Result<(), ErrorKind>; + fn push_transaction(&self, tx: Transaction, fluff: Option) -> Result<(), Error>; /** Networked version of [Owner::get_libp2p_peers](struct.Owner.html#method.get_libp2p_peers). @@ -894,42 +1137,42 @@ pub trait ForeignRpc: Sync + Send { # ); ``` */ - fn get_libp2p_peers(&self) -> Result; + fn get_libp2p_peers(&self) -> Result; /** Networked version of [Owner::get_libp2p_messages](struct.Owner.html#method.get_libp2p_messages). // No example because if current time dynamic nature. */ - fn get_libp2p_messages(&self) -> Result; + fn get_libp2p_messages(&self) -> Result; } -impl ForeignRpc for Foreign +impl ForeignRpc for Foreign where B: BlockChain, P: PoolAdapter, - V: VerifierCache + 'static, { fn get_header( &self, height: Option, hash: Option, commit: Option, - ) -> Result { + ) -> Result { let mut parsed_hash: Option = None; if let Some(hash) = hash { let vec = util::from_hex(&hash) - .map_err(|e| ErrorKind::Argument(format!("invalid block hash: {}", e)))?; + .map_err(|e| Error::Argument(format!("invalid block hash: {}", e)))?; parsed_hash = Some(Hash::from_vec(&vec)); } - Foreign::get_header(self, height, parsed_hash, commit).map_err(|e| e.kind().clone()) + Foreign::get_header(self, height, parsed_hash, commit) } + fn get_block( &self, height: Option, hash: Option, commit: Option, - ) -> Result { + ) -> Result { self.get_block_ex(height, hash, commit, None, None) } @@ -940,11 +1183,11 @@ where commit: Option, include_proof: Option, include_merkle_proof: Option, - ) -> Result { + ) -> Result { let mut parsed_hash: Option = None; if let Some(hash) = hash { let vec = util::from_hex(&hash) - .map_err(|e| ErrorKind::Argument(format!("invalid block hash: {}", e)))?; + .map_err(|e| Error::Argument(format!("invalid block hash: {}", e)))?; parsed_hash = Some(Hash::from_vec(&vec)); } Foreign::get_block( @@ -955,15 +1198,24 @@ where include_proof, include_merkle_proof, ) - .map_err(|e| e.kind().clone()) } - fn get_version(&self) -> Result { - Foreign::get_version(self).map_err(|e| e.kind().clone()) + fn get_blocks( + &self, + start_height: u64, + end_height: u64, + max: u64, + include_proof: Option, + ) -> Result { + Foreign::get_blocks(self, start_height, end_height, max, include_proof) + } + + fn get_version(&self) -> Result { + Foreign::get_version(self) } - fn get_tip(&self) -> Result { - Foreign::get_tip(self).map_err(|e| e.kind().clone()) + fn get_tip(&self) -> Result { + Foreign::get_tip(self) } fn get_kernel( @@ -971,8 +1223,8 @@ where excess: String, min_height: Option, max_height: Option, - ) -> Result { - Foreign::get_kernel(self, excess, min_height, max_height).map_err(|e| e.kind().clone()) + ) -> Result { + Foreign::get_kernel(self, excess, min_height, max_height) } fn get_outputs( @@ -982,7 +1234,7 @@ where end_height: Option, include_proof: Option, include_merkle_proof: Option, - ) -> Result, ErrorKind> { + ) -> Result, Error> { Foreign::get_outputs( self, commits, @@ -991,7 +1243,6 @@ where include_proof, include_merkle_proof, ) - .map_err(|e| e.kind().clone()) } fn get_unspent_outputs( @@ -1000,39 +1251,37 @@ where end_index: Option, max: u64, include_proof: Option, - ) -> Result { + ) -> Result { Foreign::get_unspent_outputs(self, start_index, end_index, max, include_proof) - .map_err(|e| e.kind().clone()) } fn get_pmmr_indices( &self, start_block_height: u64, end_block_height: Option, - ) -> Result { + ) -> Result { Foreign::get_pmmr_indices(self, start_block_height, end_block_height) - .map_err(|e| e.kind().clone()) } - fn get_pool_size(&self) -> Result { - Foreign::get_pool_size(self).map_err(|e| e.kind().clone()) + fn get_pool_size(&self) -> Result { + Foreign::get_pool_size(self) } - fn get_stempool_size(&self) -> Result { - Foreign::get_stempool_size(self).map_err(|e| e.kind().clone()) + fn get_stempool_size(&self) -> Result { + Foreign::get_stempool_size(self) } - fn get_unconfirmed_transactions(&self) -> Result, ErrorKind> { - Foreign::get_unconfirmed_transactions(self).map_err(|e| e.kind().clone()) + fn get_unconfirmed_transactions(&self) -> Result, Error> { + Foreign::get_unconfirmed_transactions(self) } - fn push_transaction(&self, tx: Transaction, fluff: Option) -> Result<(), ErrorKind> { - Foreign::push_transaction(self, tx, fluff).map_err(|e| e.kind().clone()) + fn push_transaction(&self, tx: Transaction, fluff: Option) -> Result<(), Error> { + Foreign::push_transaction(self, tx, fluff) } - fn get_libp2p_peers(&self) -> Result { - Foreign::get_libp2p_peers(self).map_err(|e| e.kind().clone()) + fn get_libp2p_peers(&self) -> Result { + Foreign::get_libp2p_peers(self) } - fn get_libp2p_messages(&self) -> Result { - Foreign::get_libp2p_messages(self).map_err(|e| e.kind().clone()) + fn get_libp2p_messages(&self) -> Result { + Foreign::get_libp2p_messages(self) } } diff --git a/api/src/handlers.rs b/api/src/handlers.rs index c96aec190b..f47129e1f3 100644 --- a/api/src/handlers.rs +++ b/api/src/handlers.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -42,7 +42,6 @@ use crate::auth::{ }; use crate::chain; use crate::chain::{Chain, SyncState}; -use crate::core::core::verifier_cache::VerifierCache; use crate::core::global; use crate::core::stratum; use crate::foreign::Foreign; @@ -52,7 +51,7 @@ use crate::owner_rpc::OwnerRpc; use crate::p2p; use crate::pool; use crate::pool::{BlockChain, PoolAdapter}; -use crate::rest::{ApiServer, Error, ErrorKind, TLSConfig}; +use crate::rest::{ApiServer, Error, TLSConfig}; use crate::router::ResponseFuture; use crate::router::{Router, RouterError}; use crate::stratum::Stratum; @@ -71,10 +70,10 @@ use std::thread; /// Listener version, providing same API but listening for requests on a /// port and wrapping the calls -pub fn node_apis( +pub fn node_apis( addr: &str, chain: Arc, - tx_pool: Arc>>, + tx_pool: Arc>>, peers: Arc, sync_state: Arc, api_secret: Option, @@ -88,10 +87,8 @@ pub fn node_apis( where B: BlockChain + 'static, P: PoolAdapter + 'static, - V: VerifierCache + 'static, { - // Manually build router when getting rid of v1 - //let mut router = Router::new(); + // Adding legacy owner v1 API let mut router = build_router( chain.clone(), tx_pool.clone(), @@ -115,7 +112,6 @@ where "Basic {}", to_base64(&format!("{}:{}", basic_auth_key, api_secret)) ); - let basic_auth_middleware = Arc::new(BasicAuthMiddleware::new( api_basic_auth, &MWC_BASIC_REALM, @@ -124,12 +120,12 @@ where router.add_middleware(basic_auth_middleware); } - let api_handler_v2 = OwnerAPIHandlerV2::new( + let api_handler = OwnerAPIHandlerV2::new( Arc::downgrade(&chain), Arc::downgrade(&peers), Arc::downgrade(&sync_state), ); - router.add_route("/v2/owner", Arc::new(api_handler_v2))?; + router.add_route("/v2/owner", Arc::new(api_handler))?; let stratum_handler_v2 = StratumAPIHandlerV2::new(stratum_ip_pool); router.add_route("/v2/stratum", Arc::new(stratum_handler_v2))?; @@ -140,7 +136,6 @@ where "Basic {}", to_base64(&format!("{}:{}", basic_auth_key, api_secret)) ); - let basic_auth_middleware = Arc::new(BasicAuthURIMiddleware::new( api_basic_auth, &MWC_FOREIGN_BASIC_REALM, @@ -149,13 +144,13 @@ where router.add_middleware(basic_auth_middleware); } - let api_handler_v2 = ForeignAPIHandlerV2::new( + let api_handler = ForeignAPIHandlerV2::new( Arc::downgrade(&peers), Arc::downgrade(&chain), Arc::downgrade(&tx_pool), Arc::downgrade(&sync_state), ); - router.add_route("/v2/foreign", Arc::new(api_handler_v2))?; + router.add_route("/v2/foreign", Arc::new(api_handler))?; let mut apis = ApiServer::new(); warn!("Starting HTTP Node APIs server at {}.", addr); @@ -182,7 +177,10 @@ where Ok(_) => Ok(()), Err(e) => { error!("HTTP API server failed to start. Err: {}", e); - Err(ErrorKind::Internal(format!("HTTP API server failed to start, {}", e)).into()) + Err(Error::Internal(format!( + "HTTP API server failed to start, {}", + e + ))) } } } @@ -241,29 +239,27 @@ impl crate::router::Handler for OwnerAPIHandlerV2 { } /// V2 API Handler/Wrapper for foreign functions -pub struct ForeignAPIHandlerV2 +pub struct ForeignAPIHandlerV2 where B: BlockChain, P: PoolAdapter, - V: VerifierCache + 'static, { pub peers: Weak, pub chain: Weak, - pub tx_pool: Weak>>, + pub tx_pool: Weak>>, pub sync_state: Weak, } -impl ForeignAPIHandlerV2 +impl ForeignAPIHandlerV2 where B: BlockChain, P: PoolAdapter, - V: VerifierCache + 'static, { /// Create a new foreign API handler for GET methods pub fn new( peers: Weak, chain: Weak, - tx_pool: Weak>>, + tx_pool: Weak>>, sync_state: Weak, ) -> Self { ForeignAPIHandlerV2 { @@ -275,11 +271,10 @@ where } } -impl crate::router::Handler for ForeignAPIHandlerV2 +impl crate::router::Handler for ForeignAPIHandlerV2 where B: BlockChain + 'static, P: PoolAdapter + 'static, - V: VerifierCache + 'static, { fn post(&self, req: Request) -> ResponseFuture { let api = Foreign::new( @@ -421,13 +416,13 @@ fn response>(status: StatusCode, text: T) -> Response { } // Legacy V1 router -#[deprecated( +/*#[deprecated( since = "4.0.0", note = "The V1 Node API will be removed in grin 5.0.0. Please migrate to the V2 API as soon as possible." -)] -pub fn build_router( +)]*/ +pub fn build_router( chain: Arc, - tx_pool: Arc>>, + tx_pool: Arc>>, peers: Arc, sync_state: Arc, allow_to_stop: bool, @@ -435,7 +430,6 @@ pub fn build_router( where B: BlockChain + 'static, P: PoolAdapter + 'static, - V: VerifierCache + 'static, { let route_list = vec![ "get blocks".to_string(), diff --git a/api/src/handlers/blocks_api.rs b/api/src/handlers/blocks_api.rs index cf0d391f25..a7db85f61b 100644 --- a/api/src/handlers/blocks_api.rs +++ b/api/src/handlers/blocks_api.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -24,6 +24,8 @@ use hyper::{Body, Request, StatusCode}; use regex::Regex; use std::sync::Weak; +pub const BLOCK_TRANSFER_LIMIT: u64 = 1000; + /// Gets block headers given either a hash or height or an output commit. /// GET /v1/headers/ /// GET /v1/headers/ @@ -43,20 +45,20 @@ impl HeaderHandler { match w(&self.chain)?.get_header_by_height(height) { Ok(header) => return Ok(BlockHeaderPrintable::from_header(&header)), Err(e) => { - return Err(ErrorKind::NotFound(format!( + return Err(Error::NotFound(format!( "Header for height {}, {}", height, e - )))? + ))) } } } check_block_param(&input)?; let vec = util::from_hex(&input) - .map_err(|e| ErrorKind::Argument(format!("invalid input: {}, {}", input, e)))?; + .map_err(|e| Error::Argument(format!("invalid input: {}, {}", input, e)))?; let h = Hash::from_vec(&vec); let header = w(&self.chain)? .get_block_header(&h) - .map_err(|e| ErrorKind::NotFound(format!("Block header for hash {}, {}", h, e)))?; + .map_err(|e| Error::NotFound(format!("Block header for hash {}, {}", h, e)))?; Ok(BlockHeaderPrintable::from_header(&header)) } @@ -64,17 +66,18 @@ impl HeaderHandler { let oid = match get_output(&self.chain, &commit_id)? { Some((_, o)) => o, None => { - return Err( - ErrorKind::NotFound(format!("Commit id {} not found", commit_id)).into(), - ) + return Err(Error::NotFound(format!( + "Commit id {} not found", + commit_id + ))) } }; match w(&self.chain)?.get_header_for_output(oid.commitment()) { Ok(header) => Ok(BlockHeaderPrintable::from_header(&header)), - Err(e) => Err(ErrorKind::NotFound(format!( + Err(e) => Err(Error::NotFound(format!( "Header for output {}, {}", commit_id, e - )))?, + ))), } } @@ -82,8 +85,8 @@ impl HeaderHandler { let chain = w(&self.chain)?; let header = chain .get_block_header(h) - .map_err(|e| ErrorKind::NotFound(format!("Block header for hash {}, {}", h, e)))?; - return Ok(BlockHeaderPrintable::from_header(&header)); + .map_err(|e| Error::NotFound(format!("Block header for hash {}, {}", h, e)))?; + Ok(BlockHeaderPrintable::from_header(&header)) } // Try to get hash from height, hash or output commit @@ -97,10 +100,10 @@ impl HeaderHandler { match w(&self.chain)?.get_header_by_height(height) { Ok(header) => return Ok(header.hash()), Err(e) => { - return Err(ErrorKind::NotFound(format!( + return Err(Error::NotFound(format!( "Header for height {}, {}", height, e - )))? + ))) } } } @@ -110,24 +113,22 @@ impl HeaderHandler { if let Some(commit) = commit { let oid = match get_output_v2(&self.chain, &commit, false, false)? { Some((_, o)) => o, - None => { - return Err(ErrorKind::NotFound(format!("Output {} not found", commit)).into()) - } + None => return Err(Error::NotFound(format!("Output {} not found", commit))), }; match w(&self.chain)?.get_header_for_output(oid.commitment()) { Ok(header) => return Ok(header.hash()), Err(e) => { - return Err(ErrorKind::NotFound(format!( + return Err(Error::NotFound(format!( "Header for output {:?}, {}", oid, e - )))? + ))) } } } - return Err(ErrorKind::Argument(format!( + Err(Error::Argument(format!( "not a valid hash {:?}, height {:?} or output commit {:?}", hash, height, commit - )))?; + ))) } } @@ -162,26 +163,95 @@ impl BlockHandler { let chain = w(&self.chain)?; let block = chain .get_block(h) - .map_err(|e| ErrorKind::NotFound(format!("Block for hash {}, {}", h, e)))?; + .map_err(|e| Error::NotFound(format!("Block for hash {}, {}", h, e)))?; BlockPrintable::from_block(&block, &chain, include_proof, include_merkle_proof).map_err( - |e| { - ErrorKind::Internal(format!("chain error, broken block for hash {}. {}", h, e)) - .into() - }, + |e| Error::Internal(format!("chain error, broken block for hash {}. {}", h, e)), ) } + pub fn get_blocks( + &self, + mut start_height: u64, + end_height: u64, + mut max: u64, + include_proof: Option, + ) -> Result { + // set a limit here + if max > BLOCK_TRANSFER_LIMIT { + max = BLOCK_TRANSFER_LIMIT; + } + let tail_height = self.get_tail_height()?; + let orig_start_height = start_height; + + if start_height < tail_height { + start_height = tail_height; + } + + // In full archive node, tail will be set to 1, so include genesis block as well + // for consistency + if start_height == 1 && orig_start_height == 0 { + start_height = 0; + } + + let mut result_set = BlockListing { + last_retrieved_height: 0, + blocks: vec![], + }; + let mut block_count = 0; + for h in start_height..=end_height { + result_set.last_retrieved_height = h; + + let hash = match self.parse_inputs(Some(h), None, None) { + Err(e) => { + if let Error::NotFound(_) = e { + continue; + } else { + return Err(e); + } + } + Ok(h) => h, + }; + + let block_res = self.get_block(&hash, include_proof == Some(true), false); + + match block_res { + Err(e) => { + if let Error::NotFound(_) = e { + continue; + } else { + return Err(e); + } + } + Ok(b) => { + block_count += 1; + result_set.blocks.push(b); + } + } + if block_count == max { + break; + } + } + Ok(result_set) + } + + pub fn get_tail_height(&self) -> Result { + let chain = w(&self.chain)?; + Ok(chain + .get_tail() + .map_err(|e| Error::NotFound(format!("Tail not found, {}", e)))? + .height) + } + fn get_compact_block(&self, h: &Hash) -> Result { let chain = w(&self.chain)?; let block = chain .get_block(h) - .map_err(|e| ErrorKind::NotFound(format!("Block for hash {}, {}", h, e)))?; + .map_err(|e| Error::NotFound(format!("Block for hash {}, {}", h, e)))?; CompactBlockPrintable::from_compact_block(&block.into(), &chain).map_err(|e| { - ErrorKind::Internal(format!( + Error::Internal(format!( "chain error, broken compact block for hash {}, {}", h, e )) - .into() }) } @@ -191,16 +261,16 @@ impl BlockHandler { match w(&self.chain)?.get_header_by_height(height) { Ok(header) => return Ok(header.hash()), Err(e) => { - return Err(ErrorKind::NotFound(format!( + return Err(Error::NotFound(format!( "Header for height {}, {}", height, e - )))? + ))) } } } check_block_param(&input)?; let vec = util::from_hex(&input) - .map_err(|e| ErrorKind::Argument(format!("invalid input {}, {}", input, e)))?; + .map_err(|e| Error::Argument(format!("invalid input {}, {}", input, e)))?; Ok(Hash::from_vec(&vec)) } @@ -215,10 +285,10 @@ impl BlockHandler { match w(&self.chain)?.get_header_by_height(height) { Ok(header) => return Ok(header.hash()), Err(e) => { - return Err(ErrorKind::NotFound(format!( + return Err(Error::NotFound(format!( "Header for height {}, {}", height, e - )))? + ))) } } } @@ -228,22 +298,22 @@ impl BlockHandler { if let Some(commit) = commit { let oid = match get_output_v2(&self.chain, &commit, false, false)? { Some((_, o)) => o, - None => return Err(ErrorKind::NotFound(format!("Output {}", commit)).into()), + None => return Err(Error::NotFound(format!("Output {}", commit)).into()), }; match w(&self.chain)?.get_header_for_output(oid.commitment()) { Ok(header) => return Ok(header.hash()), Err(e) => { - return Err(ErrorKind::NotFound(format!( + return Err(Error::NotFound(format!( "Header for output {:?}, {}", oid, e - )))? + ))) } } } - return Err(ErrorKind::Argument(format!( + Err(Error::Argument(format!( "not a valid hash {:?}, height {:?} or output commit {:?}", hash, height, commit - )))?; + ))) } } @@ -252,10 +322,10 @@ fn check_block_param(input: &str) -> Result<(), Error> { static ref RE: Regex = Regex::new(r"[0-9a-fA-F]{64}").unwrap(); } if !RE.is_match(&input) { - return Err(ErrorKind::Argument(format!( + return Err(Error::Argument(format!( "Not a valid hash or height value {}", input - )))?; + ))); } Ok(()) } @@ -267,7 +337,7 @@ impl Handler for BlockHandler { Err(e) => { return response( StatusCode::BAD_REQUEST, - format!("failed to parse request: {}", e), + format!("failed to parse input: {}", e), ); } Ok(h) => h, diff --git a/api/src/handlers/chain_api.rs b/api/src/handlers/chain_api.rs index 09af5b8633..55ebc0eff5 100644 --- a/api/src/handlers/chain_api.rs +++ b/api/src/handlers/chain_api.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ use super::utils::{get_output, get_output_v2, w}; use crate::chain; -use crate::core::core::hash::Hashed; +use crate::core::core::hash::{Hash, Hashed}; use crate::rest::*; use crate::router::{Handler, ResponseFuture}; use crate::types::*; @@ -34,7 +34,7 @@ impl ChainHandler { pub fn get_tip(&self) -> Result { let head = w(&self.chain)? .head() - .map_err(|e| ErrorKind::Internal(format!("can't get tip: {}", e)))?; + .map_err(|e| Error::Internal(format!("can't get head: {}", e)))?; Ok(Tip::from_tip(head)) } } @@ -53,9 +53,12 @@ pub struct ChainValidationHandler { impl ChainValidationHandler { pub fn validate_chain(&self, fast_validation: bool) -> Result<(), Error> { - w(&self.chain)? - .validate(fast_validation) - .map_err(|e| ErrorKind::Internal(format!("chain fast validation error. {}", e)).into()) + w(&self.chain)?.validate(fast_validation).map_err(|e| { + Error::Internal(format!( + "chain fast validation ({}) error: {}", + fast_validation, e + )) + }) } } @@ -71,6 +74,29 @@ impl Handler for ChainValidationHandler { } } +pub struct ChainResetHandler { + pub chain: Weak, + pub sync_state: Weak, +} + +impl ChainResetHandler { + pub fn reset_chain_head(&self, hash: Hash) -> Result<(), Error> { + let chain = w(&self.chain)?; + let header = chain.get_block_header(&hash)?; + chain.reset_chain_head(&header, true)?; + + // Reset the sync status and clear out any sync error. + w(&self.sync_state)?.reset(); + Ok(()) + } + + pub fn invalidate_header(&self, hash: Hash) -> Result<(), Error> { + let chain = w(&self.chain)?; + chain.invalidate_header(hash)?; + Ok(()) + } +} + /// Chain compaction handler. Trigger a compaction of the chain state to regain /// storage space. /// POST /v1/chain/compact @@ -82,7 +108,7 @@ impl ChainCompactHandler { pub fn compact_chain(&self) -> Result<(), Error> { w(&self.chain)? .compact() - .map_err(|e| ErrorKind::Internal(format!("compact chain error {}", e)).into()) + .map_err(|e| Error::Internal(format!("compact chain error {}", e))) } } @@ -120,11 +146,10 @@ impl OutputHandler { // First check the commits length for commit in &commits { if commit.len() != 66 { - return Err(ErrorKind::RequestError(format!( + return Err(Error::RequestError(format!( "invalid commit length for {}, expected length 66", commit - )) - .into()); + ))); } } for commit in commits { @@ -179,7 +204,7 @@ impl OutputHandler { let outputs = chain .unspent_outputs_by_pmmr_index(start_index, max, end_index) .map_err(|e| { - ErrorKind::NotFound(format!( + Error::NotFound(format!( "Unspent outputs for PMMR {}-{:?}, {}", start_index, end_index, e )) @@ -200,7 +225,7 @@ impl OutputHandler { ) }) .collect::, _>>() - .map_err(|e| ErrorKind::Internal(format!("chain error, {}", e)))?, + .map_err(|e| Error::Internal(format!("chain error, {}", e)))?, }; Ok(out) } @@ -239,15 +264,13 @@ impl OutputHandler { ) -> Result { let header = w(&self.chain)? .get_header_by_height(block_height) - .map_err(|e| { - ErrorKind::NotFound(format!("Header at height {}, {}", block_height, e)) - })?; + .map_err(|e| Error::NotFound(format!("Header at height {}, {}", block_height, e)))?; // TODO - possible to compact away blocks we care about // in the period between accepting the block and refreshing the wallet let chain = w(&self.chain)?; let block = chain.get_block(&header.hash()).map_err(|e| { - ErrorKind::NotFound(format!( + Error::NotFound(format!( "Block at height {} for hash {}, {}", block_height, header.hash(), @@ -262,12 +285,10 @@ impl OutputHandler { OutputPrintable::from_output(output, &chain, Some(&header), include_proof, true) }) .collect::, _>>() - .map_err(|e| { - ErrorKind::Internal(format!("chain read outputs from block error, {}", e)) - })?; + .map_err(|e| Error::Internal(format!("chain read outputs from block error, {}", e)))?; Ok(BlockOutputs { - header: BlockHeaderInfo::from_header(&header), + header: BlockHeaderDifficultyInfo::from_header(&header), outputs: outputs, }) } @@ -281,15 +302,13 @@ impl OutputHandler { ) -> Result, Error> { let header = w(&self.chain)? .get_header_by_height(block_height) - .map_err(|e| { - ErrorKind::NotFound(format!("Header at height {}, {}", block_height, e)) - })?; + .map_err(|e| Error::NotFound(format!("Header at height {}, {}", block_height, e)))?; // TODO - possible to compact away blocks we care about // in the period between accepting the block and refreshing the wallet let chain = w(&self.chain)?; let block = chain.get_block(&header.hash()).map_err(|e| { - ErrorKind::NotFound(format!( + Error::NotFound(format!( "Block at height {} for hash {}, {}", block_height, header.hash(), @@ -310,9 +329,7 @@ impl OutputHandler { ) }) .collect::, _>>() - .map_err(|e| { - ErrorKind::Internal(format!("chain read outputs from block error, {}", e)) - })?; + .map_err(|e| Error::Internal(format!("chain read outputs from block error, {}", e)))?; Ok(outputs) } @@ -412,17 +429,15 @@ impl KernelHandler { .trim_end_matches('/') .rsplit('/') .next() - .ok_or_else(|| ErrorKind::RequestError("missing excess".into()))?; - let excess_v = util::from_hex(excess_s).map_err(|e| { - ErrorKind::RequestError(format!("invalid excess hex {}, {}", excess_s, e)) - })?; + .ok_or_else(|| Error::RequestError("missing excess".into()))?; + let excess_v = util::from_hex(excess_s) + .map_err(|e| Error::RequestError(format!("invalid excess hex {}, {}", excess_s, e)))?; if excess_v.len() != 33 { - return Err(ErrorKind::RequestError(format!( + return Err(Error::RequestError(format!( "invalid excess {}, get length {}, expected 33", excess_s, excess_v.len() - )) - .into()); + ))); } let excess = Commitment::from_vec(excess_v); @@ -436,7 +451,7 @@ impl KernelHandler { let params = QueryParams::from(q); if let Some(hs) = params.get("min_height") { let h = hs.parse().map_err(|e| { - ErrorKind::RequestError(format!( + Error::RequestError(format!( "invalid parameter 'min_height' value {}, {}", hs, e )) @@ -446,7 +461,7 @@ impl KernelHandler { } if let Some(hs) = params.get("max_height") { let h = hs.parse().map_err(|e| { - ErrorKind::RequestError(format!( + Error::RequestError(format!( "invalid parameter 'max_height' value {}, {}", hs, e )) @@ -454,7 +469,7 @@ impl KernelHandler { // Default is current head let head_height = chain .head() - .map_err(|e| ErrorKind::Internal(format!("Unable to get a chain head, {}", e)))? + .map_err(|e| Error::Internal(format!("Unable to get a chain head, {}", e)))? .height; max_height = if h >= head_height { None } else { Some(h) }; } @@ -463,7 +478,7 @@ impl KernelHandler { let kernel = chain .get_kernel_height(&excess, min_height, max_height) .map_err(|e| { - ErrorKind::Internal(format!( + Error::Internal(format!( "Unable to get a height for the excess {}, {}", excess_s, e )) @@ -482,16 +497,14 @@ impl KernelHandler { min_height: Option, max_height: Option, ) -> Result { - let excess = util::from_hex(&excess_s).map_err(|e| { - ErrorKind::RequestError(format!("invalid excess hex {}, {}", excess_s, e)) - })?; + let excess = util::from_hex(&excess_s) + .map_err(|e| Error::RequestError(format!("invalid excess hex {}, {}", excess_s, e)))?; if excess.len() != 33 { - return Err(ErrorKind::RequestError(format!( + return Err(Error::RequestError(format!( "invalid excess {}, get length {}, expected 33", excess_s, excess.len() - )) - .into()); + ))); } let excess = Commitment::from_vec(excess); @@ -499,7 +512,7 @@ impl KernelHandler { let kernel = chain .get_kernel_height(&excess, min_height, max_height) .map_err(|e| { - ErrorKind::Internal(format!( + Error::Internal(format!( "Unable to get a height for excess {}, {}", excess_s, e )) @@ -509,12 +522,7 @@ impl KernelHandler { height, mmr_index, }); - kernel.ok_or_else(|| { - Error::from(ErrorKind::NotFound(format!( - "kernel value for excess {}", - excess_s - ))) - }) + kernel.ok_or_else(|| Error::NotFound(format!("kernel value for excess {}", excess_s))) } } diff --git a/api/src/handlers/peers_api.rs b/api/src/handlers/peers_api.rs index 5118212b6f..dc50cc055d 100644 --- a/api/src/handlers/peers_api.rs +++ b/api/src/handlers/peers_api.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -30,7 +30,7 @@ pub struct PeersAllHandler { impl Handler for PeersAllHandler { fn get(&self, _req: Request) -> ResponseFuture { - let peers = &w_fut!(&self.peers).all_peers(); + let peers = &w_fut!(&self.peers).all_peer_data(); json_response_pretty(&peers) } } @@ -42,8 +42,9 @@ pub struct PeersConnectedHandler { impl PeersConnectedHandler { pub fn get_connected_peers(&self) -> Result, Error> { let peers = w(&self.peers)? - .connected_peers() .iter() + .connected() + .into_iter() .map(|p| p.info.clone().into()) .collect::>(); @@ -83,8 +84,9 @@ impl PeersConnectedHandler { impl Handler for PeersConnectedHandler { fn get(&self, _req: Request) -> ResponseFuture { let peers: Vec = w_fut!(&self.peers) - .connected_peers() .iter() + .connected() + .into_iter() .map(|p| p.info.clone().into()) .collect(); @@ -133,14 +135,14 @@ impl PeerHandler { if let Some(addr) = addr { let peer_addr = PeerAddr::Ip(addr); let peer_data: PeerData = w(&self.peers)?.get_peer(peer_addr.clone()).map_err(|e| { - ErrorKind::Internal(format!( + Error::Internal(format!( "Unable to get peer for address {}, {}", peer_addr, e )) })?; return Ok(vec![peer_data]); } - let peers = w(&self.peers)?.all_peers(); + let peers = w(&self.peers)?.all_peer_data(); Ok(peers) } @@ -149,22 +151,20 @@ impl PeerHandler { w(&self.peers)? .ban_peer(peer_addr.clone(), ReasonForBan::ManualBan) .map_err(|e| { - ErrorKind::Internal(format!( + Error::Internal(format!( "Unable to ban peer for address {}, {}", peer_addr, e )) - .into() }) } pub fn unban_peer(&self, addr: SocketAddr) -> Result<(), Error> { let peer_addr = PeerAddr::Ip(addr); w(&self.peers)?.unban_peer(peer_addr.clone()).map_err(|e| { - ErrorKind::Internal(format!( + Error::Internal(format!( "Unable to unban peer for address {}, {}", peer_addr, e )) - .into() }) } } @@ -224,29 +224,23 @@ impl Handler for PeerHandler { match command { "ban" => match w_fut!(&self.peers).ban_peer(addr.clone(), ReasonForBan::ManualBan) { - Ok(_) => return response(StatusCode::OK, "{}"), - Err(e) => { - return response( - StatusCode::INTERNAL_SERVER_ERROR, - format!("ban for peer {} failed, {:?}", addr, e), - ) - } + Ok(_) => response(StatusCode::OK, "{}"), + Err(e) => response( + StatusCode::INTERNAL_SERVER_ERROR, + format!("ban for peer {} failed, {:?}", addr, e), + ), }, "unban" => match w_fut!(&self.peers).unban_peer(addr.clone()) { - Ok(_) => return response(StatusCode::OK, "{}"), - Err(e) => { - return response( - StatusCode::INTERNAL_SERVER_ERROR, - format!("unban for peer {} failed, {:?}", addr, e), - ) - } + Ok(_) => response(StatusCode::OK, "{}"), + Err(e) => response( + StatusCode::INTERNAL_SERVER_ERROR, + format!("unban for peer {} failed, {:?}", addr, e), + ), }, - _ => { - return response( - StatusCode::BAD_REQUEST, - format!("invalid command {}", command), - ) - } - }; + _ => response( + StatusCode::BAD_REQUEST, + format!("invalid command {}", command), + ), + } } } diff --git a/api/src/handlers/pool_api.rs b/api/src/handlers/pool_api.rs index a9d5630e7d..33c97debe3 100644 --- a/api/src/handlers/pool_api.rs +++ b/api/src/handlers/pool_api.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,9 +14,8 @@ use super::utils::w; use crate::core::core::hash::Hashed; -use crate::core::core::verifier_cache::VerifierCache; use crate::core::core::Transaction; -use crate::core::ser::{self, ProtocolVersion}; +use crate::core::ser::{self, DeserializationMode, ProtocolVersion}; use crate::pool::{self, BlockChain, PoolAdapter, PoolEntry}; use crate::rest::*; use crate::router::{Handler, ResponseFuture}; @@ -29,20 +28,18 @@ use std::sync::Weak; /// Get basic information about the transaction pool. /// GET /v1/pool -pub struct PoolInfoHandler +pub struct PoolInfoHandler where B: BlockChain, P: PoolAdapter, - V: VerifierCache + 'static, { - pub tx_pool: Weak>>, + pub tx_pool: Weak>>, } -impl Handler for PoolInfoHandler +impl Handler for PoolInfoHandler where B: BlockChain, P: PoolAdapter, - V: VerifierCache + 'static, { fn get(&self, _req: Request) -> ResponseFuture { let pool_arc = w_fut!(&self.tx_pool); @@ -54,20 +51,18 @@ where } } -pub struct PoolHandler +pub struct PoolHandler where B: BlockChain, P: PoolAdapter, - V: VerifierCache + 'static, { - pub tx_pool: Weak>>, + pub tx_pool: Weak>>, } -impl PoolHandler +impl PoolHandler where B: BlockChain, P: PoolAdapter, - V: VerifierCache + 'static, { pub fn get_pool_size(&self) -> Result { let pool_arc = w(&self.tx_pool)?; @@ -103,10 +98,10 @@ where let header = tx_pool .blockchain .chain_head() - .map_err(|e| ErrorKind::Internal(format!("Failed to get chain head, {}", e)))?; + .map_err(|e| Error::Internal(format!("Failed to get chain head, {}", e)))?; tx_pool .add_to_pool(source, tx, !fluff.unwrap_or(false), &header) - .map_err(|e| ErrorKind::Internal(format!("Failed to update pool, {}", e)))?; + .map_err(|e| Error::Internal(format!("Failed to update pool, {}", e)))?; info!("transaction {} was added to the pool", tx_hash); @@ -121,23 +116,21 @@ struct TxWrapper { /// Push new transaction to our local transaction pool. /// POST /v1/pool/push_tx -pub struct PoolPushHandler +pub struct PoolPushHandler where B: BlockChain, P: PoolAdapter, - V: VerifierCache + 'static, { - pub tx_pool: Weak>>, + pub tx_pool: Weak>>, } -async fn update_pool( - pool: Weak>>, +async fn update_pool( + pool: Weak>>, req: Request, ) -> Result<(), Error> where B: BlockChain, P: PoolAdapter, - V: VerifierCache + 'static, { let pool = w(&pool)?; let params = QueryParams::from(req.uri().query()); @@ -145,7 +138,7 @@ where let wrapper: TxWrapper = parse_body(req).await?; let tx_bin = util::from_hex(&wrapper.tx_hex).map_err(|e| { - ErrorKind::RequestError(format!( + Error::RequestError(format!( "Unable to decode transaction hex {}, {}", wrapper.tx_hex, e )) @@ -153,12 +146,15 @@ where // All wallet api interaction explicitly uses protocol version 1 for now. let version = ProtocolVersion(1); - let tx: Transaction = ser::deserialize(&mut &tx_bin[..], version).map_err(|e| { - ErrorKind::RequestError(format!( - "Unable to deserialize transaction from binary {:?}, {}", - tx_bin, e - )) - })?; + let tx: Transaction = + ser::deserialize(&mut &tx_bin[..], version, DeserializationMode::default()).map_err( + |e| { + Error::RequestError(format!( + "Unable to deserialize transaction from binary {:?}, {}", + tx_bin, e + )) + }, + )?; let source = pool::TxSource::PushApi; info!( @@ -174,18 +170,17 @@ where let header = tx_pool .blockchain .chain_head() - .map_err(|e| ErrorKind::Internal(format!("Failed to get chain head, {}", e)))?; + .map_err(|e| Error::Internal(format!("Failed to get chain head: {}", e)))?; tx_pool .add_to_pool(source, tx, !fluff, &header) - .map_err(|e| ErrorKind::Internal(format!("Failed to update pool, {}", e)))?; + .map_err(|e| Error::Internal(format!("Failed to update pool: {}", e)))?; Ok(()) } -impl Handler for PoolPushHandler +impl Handler for PoolPushHandler where B: BlockChain + 'static, P: PoolAdapter + 'static, - V: VerifierCache + 'static, { fn post(&self, req: Request) -> ResponseFuture { let pool = self.tx_pool.clone(); diff --git a/api/src/handlers/server_api.rs b/api/src/handlers/server_api.rs index 8155807c5f..76338ab06d 100644 --- a/api/src/handlers/server_api.rs +++ b/api/src/handlers/server_api.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -22,6 +22,7 @@ use crate::web::*; use grin_core::global; use hyper::{Body, Request, StatusCode}; use serde_json::json; +use std::convert::TryInto; use std::sync::atomic::Ordering; use std::sync::Weak; @@ -52,12 +53,17 @@ impl StatusHandler { pub fn get_status(&self) -> Result { let head = w(&self.chain)? .head() - .map_err(|e| ErrorKind::Internal(format!("Unable to get chain tip, {}", e)))?; + .map_err(|e| Error::Internal(format!("Unable to get chain tip, {}", e)))?; let sync_status = w(&self.sync_state)?.status(); let (api_sync_status, api_sync_info) = sync_status_to_api(sync_status); Ok(Status::from_tip_and_peers( head, - w(&self.peers)?.peer_count(), + w(&self.peers)? + .iter() + .connected() + .count() + .try_into() + .unwrap(), api_sync_status, api_sync_info, )) @@ -98,8 +104,6 @@ impl Handler for StatusHandler { global::get_server_running_controller().store(false, Ordering::SeqCst); } } - - // stop the server... result_to_response(Ok(StatusOutput::new(&processed))) } else { response( @@ -116,11 +120,12 @@ fn sync_status_to_api(sync_status: SyncStatus) -> (String, Option ("no_sync".to_string(), None), SyncStatus::AwaitingPeers(_) => ("awaiting_peers".to_string(), None), SyncStatus::HeaderSync { - current_height, + sync_head, highest_height, + .. } => ( "header_sync".to_string(), - Some(json!({ "current_height": current_height, "highest_height": highest_height })), + Some(json!({ "current_height": sync_head.height, "highest_height": highest_height })), ), SyncStatus::TxHashsetDownload(stats) => ( "txhashset_download".to_string(), diff --git a/api/src/handlers/transactions_api.rs b/api/src/handlers/transactions_api.rs index 4052acf6a0..401271c4af 100644 --- a/api/src/handlers/transactions_api.rs +++ b/api/src/handlers/transactions_api.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -47,10 +47,8 @@ impl TxHashSetHandler { // gets roots fn get_roots(&self) -> Result { let chain = w(&self.chain)?; - let res = TxHashSet::from_head(&chain).map_err(|e| { - ErrorKind::Internal(format!("failed to read roots from txhashset, {}", e)) - })?; - Ok(res) + TxHashSet::from_head(&chain) + .map_err(|e| Error::Internal(format!("failed to read roots from txhashset: {}", e))) } // gets last n outputs inserted in to the tree @@ -86,7 +84,7 @@ impl TxHashSetHandler { let outputs = chain .unspent_outputs_by_pmmr_index(start_index, max, end_index) .map_err(|e| { - ErrorKind::NotFound(format!( + Error::NotFound(format!( "Unspent output for PMMR {}-{:?}, {}", start_index, end_index, e )) @@ -99,7 +97,7 @@ impl TxHashSetHandler { .iter() .map(|x| OutputPrintable::from_output(x, &chain, None, true, true)) .collect::, _>>() - .map_err(|e| ErrorKind::Internal(format!("chain error, {}", e)))?, + .map_err(|e| Error::Internal(format!("chain error: {}", e)))?, }; Ok(out) } @@ -114,7 +112,7 @@ impl TxHashSetHandler { let range = chain .block_height_range_to_pmmr_indices(start_block_height, end_block_height) .map_err(|e| { - ErrorKind::NotFound(format!( + Error::NotFound(format!( "Block PMMR range for heights {}-{:?}, {}", start_block_height, end_block_height, e )) @@ -131,17 +129,17 @@ impl TxHashSetHandler { // (to avoid having to create a new type to pass around) fn get_merkle_proof_for_output(&self, id: &str) -> Result { let c = util::from_hex(id) - .map_err(|e| ErrorKind::Argument(format!("Not a valid commitment {}, {}", id, e)))?; + .map_err(|e| Error::Argument(format!("Not a valid commitment: {}, {}", id, e)))?; let commit = Commitment::from_vec(c); let chain = w(&self.chain)?; let output_pos = chain.get_output_pos(&commit).map_err(|e| { - ErrorKind::NotFound(format!( + Error::NotFound(format!( "Unable to get a MMR position for commit {}, {}", id, e )) })?; let merkle_proof = chain::Chain::get_merkle_proof_for_pos(&chain, commit).map_err(|e| { - ErrorKind::NotFound(format!( + Error::NotFound(format!( "Unable to get a merkle proof for commit {}, {}", id, e )) diff --git a/api/src/handlers/utils.rs b/api/src/handlers/utils.rs index cb308640ed..85b2b39ddb 100644 --- a/api/src/handlers/utils.rs +++ b/api/src/handlers/utils.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -26,7 +26,7 @@ use std::sync::{Arc, Weak}; // boilerplate of dealing with `Weak`. pub fn w(weak: &Weak) -> Result, Error> { weak.upgrade() - .ok_or_else(|| ErrorKind::Internal("failed to upgrade weak reference".to_owned()).into()) + .ok_or_else(|| Error::Internal("failed to upgrade weak reference".to_owned())) } /// Internal function to retrieves an output by a given commitment @@ -35,7 +35,7 @@ fn get_unspent( id: &str, ) -> Result, Error> { let c = util::from_hex(id) - .map_err(|_| ErrorKind::Argument(format!("Not a valid commitment: {}", id)))?; + .map_err(|_| Error::Argument(format!("Not a valid commitment: {}", id)))?; let commit = Commitment::from_vec(c); let res = chain.get_unspent(commit)?; Ok(res) @@ -71,7 +71,7 @@ pub fn get_output_v2( None => return Ok(None), }; - let output = chain.get_unspent_output_at(pos.pos)?; + let output = chain.get_unspent_output_at(pos.pos - 1)?; let header = if include_merkle_proof && output.is_coinbase() { chain.get_header_by_height(pos.height).ok() } else { diff --git a/api/src/handlers/version_api.rs b/api/src/handlers/version_api.rs index 0f71f7e75d..ddd2dbce13 100644 --- a/api/src/handlers/version_api.rs +++ b/api/src/handlers/version_api.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -33,7 +33,7 @@ impl VersionHandler { pub fn get_version(&self) -> Result { let head = w(&self.chain)? .head_header() - .map_err(|e| ErrorKind::Internal(format!("can't get chain tip, {}", e)))?; + .map_err(|e| Error::Internal(format!("can't get head: {}", e)))?; Ok(Version { node_version: CRATE_VERSION.to_owned(), diff --git a/api/src/json_rpc.rs b/api/src/json_rpc.rs index e9e1d852a3..6728ca7056 100644 --- a/api/src/json_rpc.rs +++ b/api/src/json_rpc.rs @@ -1,4 +1,4 @@ -// Copyright 2023 The MWC Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/api/src/lib.rs b/api/src/lib.rs index 278e88c98d..2eab46ac25 100644 --- a/api/src/lib.rs +++ b/api/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,9 +19,6 @@ use grin_pool as pool; use grin_util as util; -use failure; -#[macro_use] -extern crate failure_derive; #[macro_use] extern crate lazy_static; @@ -36,11 +33,11 @@ mod web; pub mod auth; pub mod client; mod foreign; -mod foreign_rpc; +pub mod foreign_rpc; mod handlers; pub mod json_rpc; mod owner; -mod owner_rpc; +pub mod owner_rpc; mod rest; mod router; mod stratum; diff --git a/api/src/owner.rs b/api/src/owner.rs index 54c7f4876a..e6ca9ee9e1 100644 --- a/api/src/owner.rs +++ b/api/src/owner.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,8 @@ //! Owner API External Definition use crate::chain::{Chain, SyncState}; -use crate::handlers::chain_api::{ChainCompactHandler, ChainValidationHandler}; +use crate::core::core::hash::Hash; +use crate::handlers::chain_api::{ChainCompactHandler, ChainResetHandler, ChainValidationHandler}; use crate::handlers::peers_api::{PeerHandler, PeersConnectedHandler}; use crate::handlers::server_api::StatusHandler; use crate::p2p::{self, PeerData}; @@ -97,6 +98,7 @@ impl Owner { } /// Trigger a validation of the chain state. + /// /// # Arguments /// * `assume_valid_rangeproofs_kernels` - if false, will validate rangeproofs, kernel signatures and sum of kernel excesses. if true, will only validate the sum of kernel excesses should equal the sum of unspent outputs minus total supply. /// @@ -128,6 +130,26 @@ impl Owner { chain_compact_handler.compact_chain() } + pub fn reset_chain_head(&self, hash: String) -> Result<(), Error> { + let hash = + Hash::from_hex(&hash).map_err(|_| Error::RequestError("invalid header hash".into()))?; + let handler = ChainResetHandler { + chain: self.chain.clone(), + sync_state: self.sync_state.clone(), + }; + handler.reset_chain_head(hash) + } + + pub fn invalidate_header(&self, hash: String) -> Result<(), Error> { + let hash = + Hash::from_hex(&hash).map_err(|_| Error::RequestError("invalid header hash".into()))?; + let handler = ChainResetHandler { + chain: self.chain.clone(), + sync_state: self.sync_state.clone(), + }; + handler.invalidate_header(hash) + } + /// Retrieves information about stored peers. /// If `None` is provided, will list all stored peers. /// diff --git a/api/src/owner_rpc.rs b/api/src/owner_rpc.rs index d04aa9fbe4..f2738e35aa 100644 --- a/api/src/owner_rpc.rs +++ b/api/src/owner_rpc.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,7 +16,7 @@ use crate::owner::Owner; use crate::p2p::PeerData; -use crate::rest::ErrorKind; +use crate::rest::Error; use crate::types::Status; use grin_p2p::types::PeerInfoDisplayLegacy; use std::net::SocketAddr; @@ -70,7 +70,7 @@ pub trait OwnerRpc: Sync + Send { # ); ``` */ - fn get_status(&self) -> Result; + fn get_status(&self) -> Result; /** Networked version of [Owner::validate_chain](struct.Owner.html#method.validate_chain). @@ -100,7 +100,7 @@ pub trait OwnerRpc: Sync + Send { # ); ``` */ - fn validate_chain(&self, assume_valid_rangeproofs_kernels: bool) -> Result<(), ErrorKind>; + fn validate_chain(&self, assume_valid_rangeproofs_kernels: bool) -> Result<(), Error>; /** Networked version of [Owner::compact_chain](struct.Owner.html#method.compact_chain). @@ -130,7 +130,11 @@ pub trait OwnerRpc: Sync + Send { # ); ``` */ - fn compact_chain(&self) -> Result<(), ErrorKind>; + fn compact_chain(&self) -> Result<(), Error>; + + fn reset_chain_head(&self, hash: String) -> Result<(), Error>; + + fn invalidate_header(&self, hash: String) -> Result<(), Error>; /** Networked version of [Owner::get_peers](struct.Owner.html#method.get_peers). @@ -172,7 +176,7 @@ pub trait OwnerRpc: Sync + Send { # ); ``` */ - fn get_peers(&self, peer_addr: Option) -> Result, ErrorKind>; + fn get_peers(&self, peer_addr: Option) -> Result, Error>; /** Networked version of [Owner::get_connected_peers](struct.Owner.html#method.get_connected_peers). @@ -291,7 +295,7 @@ pub trait OwnerRpc: Sync + Send { # ); ``` */ - fn get_connected_peers(&self) -> Result, ErrorKind>; + fn get_connected_peers(&self) -> Result, Error>; /** Networked version of [Owner::ban_peer](struct.Owner.html#method.ban_peer). @@ -321,7 +325,7 @@ pub trait OwnerRpc: Sync + Send { # ); ``` */ - fn ban_peer(&self, peer_addr: SocketAddr) -> Result<(), ErrorKind>; + fn ban_peer(&self, peer_addr: SocketAddr) -> Result<(), Error>; /** Networked version of [Owner::unban_peer](struct.Owner.html#method.unban_peer). @@ -351,36 +355,44 @@ pub trait OwnerRpc: Sync + Send { # ); ``` */ - fn unban_peer(&self, peer_addr: SocketAddr) -> Result<(), ErrorKind>; + fn unban_peer(&self, peer_addr: SocketAddr) -> Result<(), Error>; } impl OwnerRpc for Owner { - fn get_status(&self) -> Result { - Owner::get_status(self).map_err(|e| e.kind().clone()) + fn get_status(&self) -> Result { + Owner::get_status(self) + } + + fn validate_chain(&self, assume_valid_rangeproofs_kernels: bool) -> Result<(), Error> { + Owner::validate_chain(self, assume_valid_rangeproofs_kernels) + } + + fn reset_chain_head(&self, hash: String) -> Result<(), Error> { + Owner::reset_chain_head(self, hash) } - fn validate_chain(&self, assume_valid_rangeproofs_kernels: bool) -> Result<(), ErrorKind> { - Owner::validate_chain(self, assume_valid_rangeproofs_kernels).map_err(|e| e.kind().clone()) + fn invalidate_header(&self, hash: String) -> Result<(), Error> { + Owner::invalidate_header(self, hash) } - fn compact_chain(&self) -> Result<(), ErrorKind> { - Owner::compact_chain(self).map_err(|e| e.kind().clone()) + fn compact_chain(&self) -> Result<(), Error> { + Owner::compact_chain(self) } - fn get_peers(&self, addr: Option) -> Result, ErrorKind> { - Owner::get_peers(self, addr).map_err(|e| e.kind().clone()) + fn get_peers(&self, addr: Option) -> Result, Error> { + Owner::get_peers(self, addr) } - fn get_connected_peers(&self) -> Result, ErrorKind> { - Owner::get_connected_peers(self).map_err(|e| e.kind().clone()) + fn get_connected_peers(&self) -> Result, Error> { + Owner::get_connected_peers(self) } - fn ban_peer(&self, addr: SocketAddr) -> Result<(), ErrorKind> { - Owner::ban_peer(self, addr).map_err(|e| e.kind().clone()) + fn ban_peer(&self, addr: SocketAddr) -> Result<(), Error> { + Owner::ban_peer(self, addr) } - fn unban_peer(&self, addr: SocketAddr) -> Result<(), ErrorKind> { - Owner::unban_peer(self, addr).map_err(|e| e.kind().clone()) + fn unban_peer(&self, addr: SocketAddr) -> Result<(), Error> { + Owner::unban_peer(self, addr) } } diff --git a/api/src/rest.rs b/api/src/rest.rs index 5d145e711c..654885dff8 100644 --- a/api/src/rest.rs +++ b/api/src/rest.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,109 +18,50 @@ //! To use it, just have your service(s) implement the ApiEndpoint trait and //! register them on a ApiServer. -use crate::p2p::Error as P2pError; use crate::router::{Handler, HandlerObj, ResponseFuture, Router, RouterError}; use crate::web::response; -use failure::{Backtrace, Context, Fail}; use futures::channel::oneshot; use futures::TryStreamExt; use hyper::server::accept; use hyper::service::make_service_fn; use hyper::{Body, Request, Server, StatusCode}; -use rustls; use rustls::internal::pemfile; use rustls::{NoClientAuth, ServerConfig}; use std::convert::Infallible; -use std::fmt::{self, Display}; use std::fs::File; use std::net::SocketAddr; use std::sync::Arc; use std::{io, thread}; use tokio::net::TcpListener; use tokio::runtime::Runtime; +use tokio::stream::StreamExt; use tokio_rustls::TlsAcceptor; /// Errors that can be returned by an ApiEndpoint implementation. -#[derive(Debug)] -pub struct Error { - inner: Context, -} - -#[derive(Clone, Eq, PartialEq, Debug, Fail, Serialize, Deserialize)] -pub enum ErrorKind { - #[fail(display = "API Internal error: {}", _0)] +#[derive(Clone, Eq, PartialEq, Debug, thiserror::Error, Serialize, Deserialize)] +pub enum Error { + #[error("API Internal error: {0}")] Internal(String), - #[fail(display = "API Bad arguments: {}", _0)] + #[error("API Bad arguments: {0}")] Argument(String), - #[fail(display = "API Not found: {}", _0)] + #[error("API Not found: {0}")] NotFound(String), - #[fail(display = "API Request error: {}", _0)] + #[error("API Request error: {0}")] RequestError(String), - #[fail(display = "API ResponseError error: {}", _0)] + #[error("API ResponseError error: {0}")] ResponseError(String), - #[fail(display = "API Router error: {}", _0)] - Router(RouterError), - #[fail(display = "API P2P error: {}", _0)] + #[error("API Router error: {source:?}")] + Router { + #[from] + source: RouterError, + }, + #[error("API P2P error: {0}")] P2pError(String), } -impl Fail for Error { - fn cause(&self) -> Option<&dyn Fail> { - self.inner.cause() - } - - fn backtrace(&self) -> Option<&Backtrace> { - self.inner.backtrace() - } -} - -impl Display for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - Display::fmt(&self.inner, f) - } -} - -impl Error { - pub fn kind(&self) -> &ErrorKind { - self.inner.get_context() - } -} - -impl From for Error { - fn from(kind: ErrorKind) -> Error { - Error { - inner: Context::new(kind), - } - } -} - -impl From> for Error { - fn from(inner: Context) -> Error { - Error { inner: inner } - } -} - -impl From for Error { - fn from(error: RouterError) -> Error { - Error { - inner: Context::new(ErrorKind::Router(error)), - } - } -} - impl From for Error { fn from(error: crate::chain::Error) -> Error { - Error { - inner: Context::new(ErrorKind::Internal(error.to_string())), - } - } -} - -impl From for Error { - fn from(error: P2pError) -> Error { - Error { - inner: Context::new(ErrorKind::P2pError(format!("{}", error))), - } + Error::Internal(error.to_string()) } } @@ -141,7 +82,7 @@ impl TLSConfig { fn load_certs(&self) -> Result, Error> { let certfile = File::open(&self.certificate).map_err(|e| { - ErrorKind::Internal(format!( + Error::Internal(format!( "load_certs failed to open file {}, {}", self.certificate, e )) @@ -149,12 +90,12 @@ impl TLSConfig { let mut reader = io::BufReader::new(certfile); pemfile::certs(&mut reader) - .map_err(|_| ErrorKind::Internal("failed to load certificate".to_string()).into()) + .map_err(|_| Error::Internal("failed to load certificate".to_string())) } fn load_private_key(&self) -> Result { let keyfile = File::open(&self.private_key).map_err(|e| { - ErrorKind::Internal(format!( + Error::Internal(format!( "load_private_key failed to open file {}, {}", self.private_key, e )) @@ -162,12 +103,12 @@ impl TLSConfig { let mut reader = io::BufReader::new(keyfile); let keys = pemfile::pkcs8_private_keys(&mut reader) - .map_err(|_| ErrorKind::Internal("failed to load private key".to_string()))?; + .map_err(|_| Error::Internal("failed to load private key".to_string()))?; if keys.len() != 1 { - return Err(ErrorKind::Internal(format!( + return Err(Error::Internal(format!( "load_private_key expected a single private key, found {}", keys.len() - )))?; + ))); } Ok(keys[0].clone()) } @@ -177,7 +118,7 @@ impl TLSConfig { let key = self.load_private_key()?; let mut cfg = rustls::ServerConfig::new(rustls::NoClientAuth::new()); cfg.set_single_cert(certs, key) - .map_err(|e| ErrorKind::Internal(format!("set single certificate failed, {}", e)))?; + .map_err(|e| Error::Internal(format!("set single certificate failed, {}", e)))?; Ok(Arc::new(cfg)) } } @@ -219,10 +160,9 @@ impl ApiServer { api_chan: &'static mut (oneshot::Sender<()>, oneshot::Receiver<()>), ) -> Result, Error> { if self.shutdown_sender.is_some() { - return Err(ErrorKind::Internal( + return Err(Error::Internal( "Can't start HTTP API server, it's running already".to_string(), - ) - .into()); + )); } let rx = &mut api_chan.1; let tx = &mut api_chan.0; @@ -231,6 +171,7 @@ impl ApiServer { let m = oneshot::channel::<()>(); let tx = std::mem::replace(tx, m.0); self.shutdown_sender = Some(tx); + thread::Builder::new() .name("apis".to_string()) .spawn(move || { @@ -254,7 +195,7 @@ impl ApiServer { error!("HTTP API server error: {}", e) } }) - .map_err(|e| ErrorKind::Internal(format!("failed to spawn API thread. {}", e)).into()) + .map_err(|e| Error::Internal(format!("failed to spawn API thread. {}", e))) } /// Starts the TLS ApiServer at the provided address. @@ -267,18 +208,21 @@ impl ApiServer { api_chan: &'static mut (oneshot::Sender<()>, oneshot::Receiver<()>), ) -> Result, Error> { if self.shutdown_sender.is_some() { - return Err(ErrorKind::Internal( + return Err(Error::Internal( "Can't start HTTPS API server, it's running already".to_string(), - ) - .into()); + )); } + let rx = &mut api_chan.1; let tx = &mut api_chan.0; + // Jones's trick to update memory let m = oneshot::channel::<()>(); let tx = std::mem::replace(tx, m.0); self.shutdown_sender = Some(tx); + // Building certificates here because we want to handle certificates failures with panic. + // It is a fatal error on node start, not a regular error to log let certs = conf.load_certs()?; let keys = conf.load_private_key()?; @@ -286,6 +230,7 @@ impl ApiServer { config .set_single_cert(certs, keys) .expect("invalid key or certificate"); + let acceptor = TlsAcceptor::from(Arc::new(config)); thread::Builder::new() @@ -293,7 +238,10 @@ impl ApiServer { .spawn(move || { let server = async move { let mut listener = TcpListener::bind(&addr).await.expect("failed to bind"); - let listener = listener.incoming().and_then(move |s| acceptor.accept(s)); + let listener = listener + .incoming() + .and_then(move |s| acceptor.accept(s)) + .filter(|r| r.is_ok()); let server = Server::builder(accept::from_stream(listener)) .serve(make_service_fn(move |_| { @@ -314,7 +262,7 @@ impl ApiServer { error!("HTTP API server error: {}", e) } }) - .map_err(|e| ErrorKind::Internal(format!("failed to spawn API thread. {}", e)).into()) + .map_err(|e| Error::Internal(format!("failed to spawn API thread. {}", e))) } /// Stops the API server, it panics in case of error diff --git a/api/src/router.rs b/api/src/router.rs index 68c45d2bbc..8c4990d432 100644 --- a/api/src/router.rs +++ b/api/src/router.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -13,7 +13,6 @@ // limitations under the License. use futures::future::{self, Future}; -use hyper; use hyper::service::Service; use hyper::{Body, Method, Request, Response, StatusCode}; use std::collections::hash_map::DefaultHasher; @@ -87,13 +86,13 @@ pub trait Handler { } } -#[derive(Clone, Fail, Eq, Debug, PartialEq, Serialize, Deserialize)] +#[derive(Clone, thiserror::Error, Eq, Debug, PartialEq, Serialize, Deserialize)] pub enum RouterError { - #[fail(display = "Route {} already exists", _0)] + #[error("Route {0} already exists")] RouteAlreadyExists(String), - #[fail(display = "Route {} not found", _0)] + #[error("Route {0} not found")] RouteNotFound(String), - #[fail(display = "Route value not found for {}", _0)] + #[error("Route value not found for {0}")] NoValue(String), } diff --git a/api/src/stratum_rpc.rs b/api/src/stratum_rpc.rs index 5dc93ab285..74ada373c6 100644 --- a/api/src/stratum_rpc.rs +++ b/api/src/stratum_rpc.rs @@ -70,7 +70,7 @@ pub trait StratumRpc: Sync + Send { fn get_ip_list( &self, banned: Option, - ) -> Result, ErrorKind>; + ) -> Result, Error>; /** Clean IP data. As a result, if it is banned, it will become active. @@ -94,7 +94,7 @@ pub trait StratumRpc: Sync + Send { } */ - fn clean_ip(&self, ip: String) -> Result<(), ErrorKind>; + fn clean_ip(&self, ip: String) -> Result<(), Error>; /* Get single IP info stratum IP pool @@ -128,28 +128,22 @@ pub trait StratumRpc: Sync + Send { } } */ - fn get_ip_info( - &self, - ip: String, - ) -> Result; + fn get_ip_info(&self, ip: String) -> Result; } impl StratumRpc for Stratum { fn get_ip_list( &self, banned: Option, - ) -> Result, ErrorKind> { - Stratum::get_ip_list(self, banned).map_err(|e: Error| e.kind().clone()) + ) -> Result, Error> { + Stratum::get_ip_list(self, banned) } - fn clean_ip(&self, ip: String) -> Result<(), ErrorKind> { - Stratum::clean_ip(self, &ip).map_err(|e| e.kind().clone()) + fn clean_ip(&self, ip: String) -> Result<(), Error> { + Stratum::clean_ip(self, &ip) } - fn get_ip_info( - &self, - ip: String, - ) -> Result { - Stratum::get_ip_info(self, &ip).map_err(|e| e.kind().clone()) + fn get_ip_info(&self, ip: String) -> Result { + Stratum::get_ip_info(self, &ip) } } diff --git a/api/src/types.rs b/api/src/types.rs index c37214906f..4886e6e188 100644 --- a/api/src/types.rs +++ b/api/src/types.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ use crate::chain; use crate::core::core::hash::Hashed; use crate::core::core::merkle_proof::MerkleProof; -use crate::core::core::{KernelFeatures, TxKernel}; +use crate::core::core::{FeeFields, KernelFeatures, TxKernel}; use crate::core::{core, ser}; use crate::p2p; use crate::util::secp::pedersen; @@ -528,6 +528,7 @@ impl<'de> serde::de::Deserialize<'de> for OutputPrintable { #[derive(Debug, Serialize, Deserialize, Clone)] pub struct TxKernelPrintable { pub features: String, + pub fee_shift: u8, pub fee: u64, pub lock_height: u64, pub excess: String, @@ -537,17 +538,21 @@ pub struct TxKernelPrintable { impl TxKernelPrintable { pub fn from_txkernel(k: &core::TxKernel) -> TxKernelPrintable { let features = k.features.as_string(); - let (fee, lock_height) = match k.features { + let (fee_fields, lock_height) = match k.features { KernelFeatures::Plain { fee } => (fee, 0), - KernelFeatures::Coinbase => (0, 0), + KernelFeatures::Coinbase => (FeeFields::zero(), 0), KernelFeatures::HeightLocked { fee, lock_height } => (fee, lock_height), KernelFeatures::NoRecentDuplicate { fee, relative_height, } => (fee, relative_height.into()), }; + // Printing for the most advanced version that we have. In prev versions the shift is 0, we should be good + let fee = fee_fields.fee(u64::MAX); + let fee_shift: u8 = fee_fields.fee_shift(u64::MAX); TxKernelPrintable { features, + fee_shift, fee, lock_height, excess: k.excess.to_hex(), @@ -558,7 +563,7 @@ impl TxKernelPrintable { // Just the information required for wallet reconstruction #[derive(Debug, Serialize, Deserialize, Clone)] -pub struct BlockHeaderInfo { +pub struct BlockHeaderDifficultyInfo { // Hash pub hash: String, /// Height of this block since the genesis block (height 0) @@ -567,9 +572,9 @@ pub struct BlockHeaderInfo { pub previous: String, } -impl BlockHeaderInfo { - pub fn from_header(header: &core::BlockHeader) -> BlockHeaderInfo { - BlockHeaderInfo { +impl BlockHeaderDifficultyInfo { + pub fn from_header(header: &core::BlockHeader) -> BlockHeaderDifficultyInfo { + BlockHeaderDifficultyInfo { hash: header.hash().to_hex(), height: header.height, previous: header.prev_hash.to_hex(), @@ -733,7 +738,7 @@ impl CompactBlockPrintable { #[derive(Debug, Serialize, Deserialize, Clone)] pub struct BlockOutputs { /// The block header - pub header: BlockHeaderInfo, + pub header: BlockHeaderDifficultyInfo, /// A printable version of the outputs pub outputs: Vec, } @@ -750,6 +755,15 @@ pub struct OutputListing { pub outputs: Vec, } +// For traversing a set of all available blocks +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct BlockListing { + /// The last height retrieved + pub last_retrieved_height: u64, + /// A printable version of the retrieved Blocks + pub blocks: Vec, +} + #[derive(Debug, Serialize, Deserialize, Clone)] pub struct LocatedTxKernel { pub tx_kernel: TxKernel, @@ -768,7 +782,7 @@ pub struct PoolInfo { /// libp2p peers are preferable, nodes wit tor addresses can be used to expand the network #[derive(Debug, Serialize, Deserialize, Clone)] pub struct Libp2pPeers { - /// Libp2p prres + /// Libp2p peers pub libp2p_peers: Vec, /// Other nodes. There is a high chance that they are running libp2p network pub node_peers: Vec, diff --git a/api/src/web.rs b/api/src/web.rs index 49fb148d8e..41006a47a0 100644 --- a/api/src/web.rs +++ b/api/src/web.rs @@ -5,7 +5,6 @@ use futures::future::ok; use hyper::body; use hyper::{Body, Request, Response, StatusCode}; use serde::{Deserialize, Serialize}; -use serde_json; use std::collections::HashMap; use std::fmt::Debug; use url::form_urlencoded; @@ -17,11 +16,10 @@ where { let raw = body::to_bytes(req.into_body()) .await - .map_err(|e| ErrorKind::RequestError(format!("Failed to read request: {}", e)))?; + .map_err(|e| Error::RequestError(format!("Failed to read request: {}", e)))?; - serde_json::from_reader(raw.bytes()).map_err(|e| { - ErrorKind::RequestError(format!("Invalid request body (expected json), {}", e)).into() - }) + serde_json::from_reader(raw.bytes()) + .map_err(|e| Error::RequestError(format!("Invalid request body (expected json), {}", e))) } /// Convert Result to ResponseFuture @@ -31,20 +29,15 @@ where { match res { Ok(s) => json_response_pretty(&s), - Err(e) => match e.kind() { - ErrorKind::Argument(msg) => response(StatusCode::BAD_REQUEST, msg.clone()), - ErrorKind::RequestError(msg) => response(StatusCode::BAD_REQUEST, msg.clone()), - ErrorKind::NotFound(msg) => response(StatusCode::NOT_FOUND, msg.clone()), - ErrorKind::Internal(msg) => response(StatusCode::INTERNAL_SERVER_ERROR, msg.clone()), - ErrorKind::ResponseError(msg) => { - response(StatusCode::INTERNAL_SERVER_ERROR, msg.clone()) - } + Err(e) => match e { + Error::Argument(msg) => response(StatusCode::BAD_REQUEST, msg.clone()), + Error::RequestError(msg) => response(StatusCode::BAD_REQUEST, msg.clone()), + Error::NotFound(msg) => response(StatusCode::NOT_FOUND, msg.clone()), + Error::Internal(msg) => response(StatusCode::INTERNAL_SERVER_ERROR, msg.clone()), + Error::ResponseError(msg) => response(StatusCode::INTERNAL_SERVER_ERROR, msg.clone()), // place holder - ErrorKind::Router(err) => response( - StatusCode::INTERNAL_SERVER_ERROR, - format!("Router Error, {}", err), - ), - ErrorKind::P2pError(err) => response( + Error::Router { .. } => response(StatusCode::INTERNAL_SERVER_ERROR, e.to_string()), + Error::P2pError(err) => response( StatusCode::INTERNAL_SERVER_ERROR, format!("P2P Error, {}", err), ), @@ -159,7 +152,7 @@ macro_rules! must_get_query( ($req: expr) =>( match $req.uri().query() { Some(q) => q, - None => return Err(ErrorKind::RequestError( format!("no query string at uri {}",$req.uri())))?, + None => return Err(Error::RequestError( format!("no query string at uri {}",$req.uri())))?, } )); @@ -170,7 +163,7 @@ macro_rules! parse_param( None => $default, Some(val) => match val.parse() { Ok(val) => val, - Err(_) => return Err(ErrorKind::RequestError(format!("invalid value of parameter {}", $name)).into()), + Err(_) => return Err(Error::RequestError(format!("invalid value of parameter {}", $name))), } } )); diff --git a/chain/Cargo.toml b/chain/Cargo.toml index 4cac85a422..8b27956980 100644 --- a/chain/Cargo.toml +++ b/chain/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "grin_chain" -version = "4.4.2" +version = "5.3.2" authors = ["Grin Developers "] description = "Chain implementation for grin, a simple, private and scalable cryptocurrency implementation based on the MimbleWimble chain format." license = "Apache-2.0" @@ -13,21 +13,20 @@ edition = "2018" bit-vec = "0.6" bitflags = "1" byteorder = "1" -failure = "0.1" -failure_derive = "0.1" -croaring = "1.0.1" +croaring = "1.1" enum_primitive = "0.1" log = "0.4" serde = "1" serde_derive = "1" +thiserror = "1" chrono = "0.4.11" lru-cache = "0.1" lazy_static = "1" -grin_core = { path = "../core", version = "4.4.2" } -grin_keychain = { path = "../keychain", version = "4.4.2" } -grin_store = { path = "../store", version = "4.4.2" } -grin_util = { path = "../util", version = "4.4.2" } +grin_core = { path = "../core", version = "5.3.2" } +grin_keychain = { path = "../keychain", version = "5.3.2" } +grin_store = { path = "../store", version = "5.3.2" } +grin_util = { path = "../util", version = "5.3.2" } [dev-dependencies] env_logger = "0.7" diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 05e653f0b9..ea6913b450 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,9 +15,7 @@ //! Facade and handler for the rest of the blockchain implementation //! and mostly the chain pipeline. -use crate::core::core::hash::{Hash, Hashed, ZERO_HASH}; use crate::core::core::merkle_proof::MerkleProof; -use crate::core::core::verifier_cache::VerifierCache; use crate::core::core::{ Block, BlockHeader, BlockSums, Committed, Inputs, KernelFeatures, Output, OutputIdentifier, Transaction, TxKernel, @@ -25,24 +23,31 @@ use crate::core::core::{ use crate::core::global; use crate::core::pow; use crate::core::ser::ProtocolVersion; -use crate::error::{Error, ErrorKind}; +use crate::error::Error; use crate::pipe; use crate::store; use crate::txhashset; -use crate::txhashset::{PMMRHandle, TxHashSet}; +use crate::txhashset::{Desegmenter, PMMRHandle, Segmenter, TxHashSet}; use crate::types::{ BlockStatus, ChainAdapter, CommitPos, NoStatus, Options, Tip, TxHashsetWriteStatus, }; use crate::util::secp::pedersen::{Commitment, RangeProof}; -use crate::{util::RwLock, ChainStore}; +use crate::util::RwLock; +use crate::ChainStore; +use crate::{ + core::core::hash::{Hash, Hashed}, + store::Batch, + txhashset::{ExtensionPair, HeaderExtension}, +}; +use grin_core::ser; use grin_store::Error::NotFoundErr; use grin_util::ToHex; -use std::collections::HashMap; use std::fs::{self, File}; use std::path::{Path, PathBuf}; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::{Duration, Instant}; +use std::{collections::HashMap, io::Cursor}; /// Orphan pool size is limited by MAX_ORPHAN_SIZE pub const MAX_ORPHAN_SIZE: usize = 200; @@ -153,12 +158,13 @@ pub struct Chain { orphans: Arc, txhashset: Arc>, header_pmmr: Arc>>, - sync_pmmr: Arc>>, - verifier_cache: Arc>, + pibd_segmenter: Arc>>, + pibd_desegmenter: Arc>>, // POW verification function pow_verifier: fn(&BlockHeader) -> Result<(), pow::Error>, + denylist: Arc>>, archive_mode: bool, - genesis: BlockHeader, + genesis: Block, } impl Chain { @@ -170,7 +176,6 @@ impl Chain { adapter: Arc, genesis: Block, pow_verifier: fn(&BlockHeader) -> Result<(), pow::Error>, - verifier_cache: Arc>, archive_mode: bool, ) -> Result { let store = Arc::new(store::ChainStore::new(&db_root)?); @@ -188,20 +193,8 @@ impl Chain { ProtocolVersion(1), None, )?; - let mut sync_pmmr = PMMRHandle::new( - Path::new(&db_root).join("header").join("sync_head"), - false, - ProtocolVersion(1), - None, - )?; - setup_head( - &genesis, - &store, - &mut header_pmmr, - &mut sync_pmmr, - &mut txhashset, - )?; + setup_head(&genesis, &store, &mut header_pmmr, &mut txhashset, false)?; // Initialize the output_pos index based on UTXO set // and NRD kernel_pos index based recent kernel history. @@ -219,25 +212,132 @@ impl Chain { orphans: Arc::new(OrphanBlockPool::new()), txhashset: Arc::new(RwLock::new(txhashset)), header_pmmr: Arc::new(RwLock::new(header_pmmr)), - sync_pmmr: Arc::new(RwLock::new(sync_pmmr)), + pibd_segmenter: Arc::new(RwLock::new(None)), + pibd_desegmenter: Arc::new(RwLock::new(None)), pow_verifier, - verifier_cache, + denylist: Arc::new(RwLock::new(vec![])), archive_mode, - genesis: genesis.header, + genesis: genesis, }; // If known bad block exists on "current chain" then rewind prior to this. // Suppress any errors here in case we cannot find chain.rewind_bad_block()?; - let header_head = chain.header_head()?; - chain.rebuild_sync_mmr(&header_head)?; - chain.log_heads()?; Ok(chain) } + /// Add provided header hash to our "denylist". + /// The header corresponding to any "denied" hash will be rejected + /// and the peer subsequently banned. + pub fn invalidate_header(&self, hash: Hash) -> Result<(), Error> { + self.denylist.write().push(hash); + Ok(()) + } + + /// Reset both head and header_head to the provided header. + /// Handles simple rewind and more complex fork scenarios. + /// Used by the reset_chain_head owner api endpoint. + /// Caller can choose not to rewind headers, which can be used + /// during PIBD scenarios where it's desirable to restart the PIBD process + /// without re-downloading the header chain + pub fn reset_chain_head>( + &self, + head: T, + rewind_headers: bool, + ) -> Result<(), Error> { + let head = head.into(); + + let mut header_pmmr = self.header_pmmr.write(); + let mut txhashset = self.txhashset.write(); + let mut batch = self.store.batch()?; + + let header = batch.get_block_header(&head.hash())?; + + // Rewind and reapply blocks to reset the output/rangeproof/kernel MMR. + txhashset::extending( + &mut header_pmmr, + &mut txhashset, + &mut batch, + |ext, batch| { + self.rewind_and_apply_fork(&header, ext, batch)?; + batch.save_body_head(&head)?; + Ok(()) + }, + )?; + + if rewind_headers { + // If the rewind of full blocks was successful then we can rewind the header MMR. + // Rewind and reapply headers to reset the header MMR. + txhashset::header_extending(&mut header_pmmr, &mut batch, |ext, batch| { + self.rewind_and_apply_header_fork(&header, ext, batch)?; + batch.save_header_head(&head)?; + Ok(()) + })?; + } + + batch.commit()?; + + Ok(()) + } + + /// wipes the chain head down to genesis, without attempting to rewind + /// Used upon PIBD failure, where we want to keep the header chain but + /// restart the output PMMRs from scratch + pub fn reset_chain_head_to_genesis(&self) -> Result<(), Error> { + let mut header_pmmr = self.header_pmmr.write(); + let mut txhashset = self.txhashset.write(); + let batch = self.store.batch()?; + + // Change head back to genesis + { + let head = Tip::from_header(&self.genesis.header); + batch.save_body_head(&head)?; + batch.commit()?; + } + + // Reinit + setup_head( + &self.genesis, + &self.store, + &mut header_pmmr, + &mut txhashset, + true, + )?; + + Ok(()) + } + + /// Reset prune lists (when PIBD resets and rolls back the + /// entire chain, the prune list needs to be manually wiped + /// as it's currently not included as part of rewind) + pub fn reset_prune_lists(&self) -> Result<(), Error> { + let mut header_pmmr = self.header_pmmr.write(); + let mut txhashset = self.txhashset.write(); + let mut batch = self.store.batch()?; + + txhashset::extending(&mut header_pmmr, &mut txhashset, &mut batch, |ext, _| { + let extension = &mut ext.extension; + extension.reset_prune_lists(); + Ok(()) + })?; + Ok(()) + } + + /// Reset PIBD head + pub fn reset_pibd_head(&self) -> Result<(), Error> { + let batch = self.store.batch()?; + batch.save_pibd_head(&self.genesis().into())?; + Ok(()) + } + + /// Are we running with archive_mode enabled? + pub fn archive_mode(&self) -> bool { + self.archive_mode + } + /// Return our shared header MMR handle. pub fn header_pmmr(&self) -> Arc>> { self.header_pmmr.clone() @@ -248,6 +348,11 @@ impl Chain { self.txhashset.clone() } + /// return genesis header + pub fn genesis(&self) -> BlockHeader { + self.genesis.header.clone() + } + /// Shared store instance. pub fn store(&self) -> Arc { self.store.clone() @@ -258,7 +363,7 @@ impl Chain { let hash = Hash::from_hex(BLOCK_TO_BAN)?; if let Ok(header) = self.get_block_header(&hash) { - if self.is_on_current_chain(&header).is_ok() { + if self.is_on_current_chain(&header, self.head()?).is_ok() { debug!( "rewind_bad_block: found header: {} at {}", header.hash(), @@ -293,7 +398,7 @@ impl Chain { &mut txhashset, &mut batch, |ext, batch| { - pipe::rewind_and_apply_fork(&prev_header, ext, batch)?; + self.rewind_and_apply_fork(&prev_header, ext, batch)?; // Reset chain head. batch.save_body_head(&new_head)?; @@ -321,7 +426,7 @@ impl Chain { let old_header_head = batch.header_head()?; txhashset::header_extending(&mut header_pmmr, &mut batch, |ext, batch| { - pipe::rewind_and_apply_header_fork(&prev_header, ext, batch)?; + self.rewind_and_apply_header_fork(&prev_header, ext, batch)?; // Reset chain head. batch.save_header_head(&new_head)?; @@ -357,16 +462,6 @@ impl Chain { }; log_head("head", self.head()?); log_head("header_head", self.header_head()?); - log_head("sync_head", self.get_sync_head()?); - - // Needed for Node State tracking... - let sync_head = self.get_sync_head()?; - info!( - "init: sync_head: {} @ {} [{}]", - sync_head.total_difficulty.to_num(), - sync_head.height, - sync_head.last_block_h, - ); Ok(()) } @@ -406,7 +501,7 @@ impl Chain { let inputs: Vec<_> = txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |ext, batch| { let previous_header = batch.get_previous_header(&block.header)?; - pipe::rewind_and_apply_fork(&previous_header, ext, batch)?; + self.rewind_and_apply_fork(&previous_header, ext, batch)?; ext.extension .utxo_view(ext.header_extension) .validate_inputs(&block.inputs(), batch) @@ -429,7 +524,7 @@ impl Chain { // If head is updated then we are either "next" block or we just experienced a "reorg" to new head. // Otherwise this is a "fork" off the main chain. if let Some(head) = head { - if head.prev_block_h == prev_head.last_block_h { + if self.is_on_current_chain(prev_head, head).is_ok() { BlockStatus::Next { prev } } else { BlockStatus::Reorg { @@ -448,14 +543,15 @@ impl Chain { } /// Quick check for "known" duplicate block up to and including current chain head. - fn is_known(&self, header: &BlockHeader) -> Result<(), Error> { + /// Returns an error if this block is "known". + pub fn is_known(&self, header: &BlockHeader) -> Result<(), Error> { let head = self.head()?; if head.hash() == header.hash() { - return Err(ErrorKind::Unfit("duplicate block".into()).into()); + return Err(Error::Unfit("duplicate block".into())); } if header.total_difficulty() <= head.total_difficulty { if self.block_exists(header.hash())? { - return Err(ErrorKind::Unfit("duplicate block".into()).into()); + return Err(Error::Unfit("duplicate block".into())); } } Ok(()) @@ -491,21 +587,21 @@ impl Chain { }, ); - Err(ErrorKind::Orphan("".to_string()).into()) + Err(Error::Orphan(String::new())) } /// Attempt to add a new block to the chain. /// Returns true if it has been added to the longest chain /// or false if it has added to a fork (or orphan?). fn process_block_single(&self, b: Block, opts: Options) -> Result, Error> { - // Check if we already know about this block. - self.is_known(&b.header)?; - // Process the header first. // If invalid then fail early. // If valid then continue with block processing with header_head committed to db etc. self.process_block_header(&b.header, opts)?; + // Check if we already know about this full block. + self.is_known(&b.header)?; + // Check if this block is an orphan. // Only do this once we know the header PoW is valid. self.check_orphan(&b, opts)?; @@ -515,72 +611,33 @@ impl Chain { // This conversion also ensures a block received in "v2" has valid input features (prevents malleability). let b = self.convert_block_v2(b)?; - let (maybe_new_head, prev_head) = { + let (head, fork_point, prev_head) = { let mut header_pmmr = self.header_pmmr.write(); let mut txhashset = self.txhashset.write(); let batch = self.store.batch()?; let prev_head = batch.head()?; let mut ctx = self.new_ctx(opts, batch, &mut header_pmmr, &mut txhashset)?; - let maybe_new_head = pipe::process_block(&b, &mut ctx); + let (head, fork_point) = pipe::process_block(&b, &mut ctx)?; - // We have flushed txhashset extension changes to disk - // but not yet committed the batch. - // A node shutdown at this point can be catastrophic... - // We prevent this via the stop_lock (see above). - if maybe_new_head.is_ok() { - ctx.batch.commit()?; - } + ctx.batch.commit()?; // release the lock and let the batch go before post-processing - (maybe_new_head, prev_head) + (head, fork_point, prev_head) }; - match maybe_new_head { - Ok((head, fork_point)) => { - let prev = self.get_previous_header(&b.header)?; - let status = self.determine_status( - head, - Tip::from_header(&prev), - prev_head, - Tip::from_header(&fork_point), - ); + let prev = self.get_previous_header(&b.header)?; + let status = self.determine_status( + head, + Tip::from_header(&prev), + prev_head, + Tip::from_header(&fork_point), + ); - // notifying other parts of the system of the update - self.adapter.block_accepted(&b, status, opts); + // notifying other parts of the system of the update + self.adapter.block_accepted(&b, status, opts); - Ok(head) - } - Err(e) => match e.kind() { - ErrorKind::InvalidBlockProof(err) => { - debug!( - "Block {} at {}: block proof error: {:?}", - b.hash(), - b.header.height, - &err - ); - Err(e) - } - ErrorKind::Unfit(ref msg) => { - debug!( - "Block {} at {} is unfit at this time: {}", - b.hash(), - b.header.height, - msg - ); - Err(ErrorKind::Unfit(msg.clone()).into()) - } - _ => { - info!( - "Rejected block {} at {}: {:?}", - b.hash(), - b.header.height, - e - ); - Err(ErrorKind::Other(format!("Rejected block, {:?}", e)).into()) - } - }, - } + Ok(head) } /// Process a block header received during "header first" propagation. @@ -599,28 +656,23 @@ impl Chain { /// Attempt to add new headers to the header chain (or fork). /// This is only ever used during sync and is based on sync_head. /// We update header_head here if our total work increases. - pub fn sync_block_headers(&self, headers: &[BlockHeader], opts: Options) -> Result<(), Error> { - let mut sync_pmmr = self.sync_pmmr.write(); + /// Returns the new sync_head (may temporarily diverge from header_head when syncing a long fork). + pub fn sync_block_headers( + &self, + headers: &[BlockHeader], + sync_head: Tip, + opts: Options, + ) -> Result, Error> { let mut header_pmmr = self.header_pmmr.write(); let mut txhashset = self.txhashset.write(); + let batch = self.store.batch()?; - // Sync the chunk of block headers, updating sync_head as necessary. - { - let batch = self.store.batch()?; - let mut ctx = self.new_ctx(opts, batch, &mut sync_pmmr, &mut txhashset)?; - pipe::sync_block_headers(headers, &mut ctx)?; - ctx.batch.commit()?; - } - - // Now "process" the last block header, updating header_head to match sync_head. - if let Some(header) = headers.last() { - let batch = self.store.batch()?; - let mut ctx = self.new_ctx(opts, batch, &mut header_pmmr, &mut txhashset)?; - pipe::process_block_header(header, &mut ctx)?; - ctx.batch.commit()?; - } + // Sync the chunk of block headers, updating header_head if total work increases. + let mut ctx = self.new_ctx(opts, batch, &mut header_pmmr, &mut txhashset)?; + let sync_head = pipe::process_block_headers(headers, sync_head, &mut ctx)?; + ctx.batch.commit()?; - Ok(()) + Ok(sync_head) } /// Build a new block processing context. @@ -631,10 +683,13 @@ impl Chain { header_pmmr: &'a mut txhashset::PMMRHandle, txhashset: &'a mut txhashset::TxHashSet, ) -> Result, Error> { + let denylist = self.denylist.read().clone(); Ok(pipe::BlockContext { opts, pow_verifier: self.pow_verifier, - verifier_cache: self.verifier_cache.clone(), + header_allowed: Box::new(move |header| { + pipe::validate_header_denylist(header, &denylist) + }), header_pmmr, txhashset, batch, @@ -717,11 +772,11 @@ impl Chain { } /// Retrieves an unspent output using its PMMR position - pub fn get_unspent_output_at(&self, pos: u64) -> Result { + pub fn get_unspent_output_at(&self, pos0: u64) -> Result { let header_pmmr = self.header_pmmr.read(); let txhashset = self.txhashset.read(); txhashset::utxo_view(&header_pmmr, &txhashset, |utxo, _| { - utxo.get_unspent_output_at(pos) + utxo.get_unspent_output_at(pos0) }) } @@ -802,7 +857,7 @@ impl Chain { if tx.lock_height() <= height { Ok(()) } else { - Err(ErrorKind::TxLockHeight.into()) + Err(Error::TxLockHeight) } } @@ -834,9 +889,16 @@ impl Chain { // latest block header. Rewind the extension to the specified header to // ensure the view is consistent. txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |ext, batch| { - pipe::rewind_and_apply_fork(&header, ext, batch)?; - ext.extension - .validate(&self.genesis, fast_validation, &NoStatus, &header)?; + self.rewind_and_apply_fork(&header, ext, batch)?; + ext.extension.validate( + &self.genesis.header, + fast_validation, + &NoStatus, + None, + None, + &header, + None, + )?; Ok(()) }) } @@ -847,7 +909,7 @@ impl Chain { let prev_root = txhashset::header_extending_readonly(&mut header_pmmr, &self.store(), |ext, batch| { let prev_header = batch.get_previous_header(header)?; - pipe::rewind_and_apply_header_fork(&prev_header, ext, batch)?; + self.rewind_and_apply_header_fork(&prev_header, ext, batch)?; ext.root() })?; @@ -866,7 +928,7 @@ impl Chain { let (prev_root, roots, sizes) = txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |ext, batch| { let previous_header = batch.get_previous_header(&b.header)?; - pipe::rewind_and_apply_fork(&previous_header, ext, batch)?; + self.rewind_and_apply_fork(&previous_header, ext, batch)?; let extension = &mut ext.extension; let header_extension = &mut ext.header_extension; @@ -894,7 +956,7 @@ impl Chain { b.header.prev_root = prev_root; // Set the output, rangeproof and kernel MMR roots. - b.header.output_root = roots.output_root(&b.header); + b.header.output_root = roots.output_root; b.header.range_proof_root = roots.rproof_root; b.header.kernel_root = roots.kernel_root; @@ -911,7 +973,7 @@ impl Chain { let mut txhashset = self.txhashset.write(); let merkle_proof = txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |ext, batch| { - pipe::rewind_and_apply_fork(&header, ext, batch)?; + self.rewind_and_apply_fork(&header, ext, batch)?; ext.extension.merkle_proof(out_id, batch) })?; @@ -925,6 +987,35 @@ impl Chain { txhashset.merkle_proof(commit) } + /// Rewind and apply fork with the chain specific header validation (denylist) rules. + /// If we rewind and re-apply a "denied" block then validation will fail. + fn rewind_and_apply_fork( + &self, + header: &BlockHeader, + ext: &mut ExtensionPair, + batch: &Batch, + ) -> Result { + let denylist = self.denylist.read().clone(); + let (header, _) = pipe::rewind_and_apply_fork(header, ext, batch, &|header| { + pipe::validate_header_denylist(header, &denylist) + })?; + Ok(header) + } + + /// Rewind and apply fork with the chain specific header validation (denylist) rules. + /// If we rewind and re-apply a "denied" header then validation will fail. + fn rewind_and_apply_header_fork( + &self, + header: &BlockHeader, + ext: &mut HeaderExtension, + batch: &Batch, + ) -> Result<(), Error> { + let denylist = self.denylist.read().clone(); + pipe::rewind_and_apply_header_fork(header, ext, batch, &|header| { + pipe::validate_header_denylist(header, &denylist) + }) + } + /// Provides a reading view into the current txhashset state as well as /// the required indexes for a consumer to rewind to a consistent state /// at the provided block hash. @@ -938,8 +1029,9 @@ impl Chain { let mut header_pmmr = self.header_pmmr.write(); let mut txhashset = self.txhashset.write(); + txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |ext, batch| { - pipe::rewind_and_apply_fork(&header, ext, batch)?; + self.rewind_and_apply_fork(&header, ext, batch)?; ext.extension.snapshot(batch)?; // prepare the zip @@ -948,6 +1040,131 @@ impl Chain { }) } + /// The segmenter is responsible for generation PIBD segments. + /// We cache a segmenter instance based on the current archve period (new period every 12 hours). + /// This allows us to efficiently generate bitmap segments for the current archive period. + /// + /// It is a relatively expensive operation to initializa and cache a new segmenter instance + /// as this involves rewinding the txhashet by approx 720 blocks (12 hours). + /// + /// Caller is responsible for only doing this when required. + /// Caller should verify a peer segment request is valid before calling this for example. + /// + pub fn segmenter(&self) -> Result { + // The archive header corresponds to the data we will segment. + let ref archive_header = self.txhashset_archive_header()?; + + // Use our cached segmenter if we have one and the associated header matches. + if let Some(x) = self.pibd_segmenter.read().as_ref() { + if x.header() == archive_header { + return Ok(x.clone()); + } + } + + // We have no cached segmenter or the cached segmenter is no longer useful. + // Initialize a new segment, cache it and return it. + let segmenter = self.init_segmenter(archive_header)?; + let mut cache = self.pibd_segmenter.write(); + *cache = Some(segmenter.clone()); + + return Ok(segmenter); + } + + /// This is an expensive rewind to recreate bitmap state but we only need to do this once. + /// Caller is responsible for "caching" the segmenter (per archive period) for reuse. + fn init_segmenter(&self, header: &BlockHeader) -> Result { + let now = Instant::now(); + debug!( + "init_segmenter: initializing new segmenter for {} at {}", + header.hash(), + header.height + ); + + let mut header_pmmr = self.header_pmmr.write(); + let mut txhashset = self.txhashset.write(); + + let local_output_mmr_size = txhashset.output_mmr_size(); + let local_kernel_mmr_size = txhashset.kernel_mmr_size(); + let local_rangeproof_mmr_size = txhashset.rangeproof_mmr_size(); + + if header.output_mmr_size > local_output_mmr_size + || header.kernel_mmr_size > local_kernel_mmr_size + || header.output_mmr_size > local_rangeproof_mmr_size + { + return Err(Error::ChainInSyncing(format!("Header expected mmr size: output:{} kernel:{}. Chains mmr size: output:{} kernel:{} rangeproof:{}", + header.output_mmr_size, header.kernel_mmr_size, local_output_mmr_size, local_kernel_mmr_size, local_rangeproof_mmr_size))); + } + + let bitmap_snapshot = + txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |ext, batch| { + ext.extension.rewind(header, batch)?; + Ok(ext.extension.bitmap_accumulator()) + })?; + + debug!("init_segmenter: done, took {}ms", now.elapsed().as_millis()); + + Ok(Segmenter::new( + self.txhashset(), + Arc::new(bitmap_snapshot), + header.clone(), + )) + } + + /// instantiate desegmenter for this header. Expected that handshake is done and as a result, header with bitmap_root_hash is known + pub fn create_desegmenter( + &self, + archive_header: &BlockHeader, + bitmap_root_hash: Hash, + ) -> Result<(), Error> { + let desegmenter = self.init_desegmenter(archive_header, bitmap_root_hash)?; + *self.pibd_desegmenter.write() = Some(desegmenter); + Ok(()) + } + + /// instantiate desegmenter (in same lazy fashion as segmenter, though this should not be as + /// expensive an operation) + pub fn get_desegmenter( + &self, + archive_header: &BlockHeader, + ) -> Arc>> { + // Use our cached desegmenter if we have one and the associated header matches. + if let Some(d) = self.pibd_desegmenter.write().as_ref() { + if d.header() == archive_header { + return self.pibd_desegmenter.clone(); + } + } + return Arc::new(RwLock::new(None)); + } + + /// Reset desegmenter associated with this seesion + pub fn reset_desegmenter(&self) { + *self.pibd_desegmenter.write() = None + } + + /// initialize a desegmenter, which is capable of extending the hashset by appending + /// PIBD segments of the three PMMR trees + Bitmap PMMR + /// header should be the same header as selected for the txhashset.zip archive + fn init_desegmenter( + &self, + header: &BlockHeader, + bitmap_root_hash: Hash, + ) -> Result { + debug!( + "init_desegmenter: initializing new desegmenter for {} at {}", + header.hash(), + header.height + ); + + Ok(Desegmenter::new( + self.txhashset(), + self.header_pmmr.clone(), + header.clone(), + bitmap_root_hash, + self.genesis.header.clone(), + self.store.clone(), + )) + } + /// To support the ability to download the txhashset from multiple peers in parallel, /// the peers must all agree on the exact binary representation of the txhashset. /// This means compacting and rewinding to the exact same header. @@ -969,6 +1186,17 @@ impl Chain { self.get_header_by_height(txhashset_height) } + /// Return the Block Header at the txhashset horizon, considering only the + /// contents of the header PMMR + pub fn txhashset_archive_header_header_only(&self) -> Result { + let header_head = self.header_head()?; + let threshold = global::state_sync_threshold() as u64; + let archive_interval = global::txhashset_archive_interval(); + let mut txhashset_height = header_head.height.saturating_sub(threshold); + txhashset_height = txhashset_height.saturating_sub(txhashset_height % archive_interval); + self.get_header_by_height(txhashset_height) + } + // Special handling to make sure the whole kernel set matches each of its // roots in each block header, without truncation. We go back header by // header, rewind and check each root. This fixes a potential weakness in @@ -1001,115 +1229,31 @@ impl Chain { Ok(()) } - /// Rebuild the sync MMR based on current header_head. - /// We rebuild the sync MMR when first entering sync mode so ensure we - /// have an MMR we can safely rewind based on the headers received from a peer. - pub fn rebuild_sync_mmr(&self, head: &Tip) -> Result<(), Error> { - let mut sync_pmmr = self.sync_pmmr.write(); - let mut batch = self.store.batch()?; - let header = batch.get_block_header(&head.hash())?; - txhashset::header_extending(&mut sync_pmmr, &mut batch, |ext, batch| { - pipe::rewind_and_apply_header_fork(&header, ext, batch)?; - Ok(()) - })?; - batch.commit()?; - Ok(()) - } - - /// Check chain status whether a txhashset downloading is needed - pub fn check_txhashset_needed( - &self, - caller: String, - hashes: &mut Option>, - ) -> Result { - let horizon = global::cut_through_horizon() as u64; + /// Finds the "fork point" where header chain diverges from full block chain. + /// If we are syncing this will correspond to the last full block where + /// the next header is known but we do not yet have the full block. + /// i.e. This is the last known full block and all subsequent blocks are missing. + pub fn fork_point(&self) -> Result { let body_head = self.head()?; - let header_head = self.header_head()?; - let sync_head = self.get_sync_head()?; - - debug!( - "{}: body_head - {}, {}, header_head - {}, {}, sync_head - {}, {}", - caller, - body_head.last_block_h, - body_head.height, - header_head.last_block_h, - header_head.height, - sync_head.last_block_h, - sync_head.height, - ); - - if body_head.total_difficulty >= header_head.total_difficulty { - debug!( - "{}: no need txhashset. header_head.total_difficulty: {} <= body_head.total_difficulty: {}", - caller, header_head.total_difficulty, body_head.total_difficulty, - ); - return Ok(false); + let mut current = self.get_block_header(&body_head.hash())?; + while !self.is_on_current_chain(¤t, body_head).is_ok() { + current = self.get_previous_header(¤t)?; } + Ok(current) + } - let mut oldest_height = 0; - let mut oldest_hash = ZERO_HASH; - - // Start with body_head (head of the full block chain) - let mut current = self.get_block_header(&body_head.last_block_h); - if current.is_err() { - error!( - "{}: body_head not found in chain db: {} at {}", - caller, body_head.last_block_h, body_head.height, - ); + /// Compare fork point to our horizon. + /// If beyond the horizon then we cannot sync via recent full blocks + /// and we need a state (txhashset) sync. + pub fn check_txhashset_needed(&self, fork_point: &BlockHeader) -> Result { + if self.archive_mode() { + debug!("check_txhashset_needed: we are running with archive_mode=true, not needed"); return Ok(false); } - // - // TODO - Investigate finding the "common header" by comparing header_mmr and - // sync_mmr (bytes will be identical up to the common header). - // - // Traverse back through the full block chain from body head until we find a header - // that "is on current chain", which is the "fork point" between existing header chain - // and full block chain. - while let Ok(header) = current { - // break out of the while loop when we find a header common - // between the header chain and the current body chain - if self.is_on_current_chain(&header).is_ok() { - oldest_height = header.height; - oldest_hash = header.hash(); - break; - } - - current = self.get_previous_header(&header); - } - - // Traverse back through the header chain from header_head back to this fork point. - // These are the blocks that we need to request in body sync (we have the header but not the full block). - if let Some(hs) = hashes { - let mut h = self.get_block_header(&header_head.last_block_h); - while let Ok(header) = h { - if header.height <= oldest_height { - break; - } - hs.push(header.hash()); - h = self.get_previous_header(&header); - } - } - - if oldest_height < header_head.height.saturating_sub(horizon) { - if oldest_hash != ZERO_HASH { - // this is the normal case. for example: - // body head height is 1 (and not a fork), oldest_height will be 1 - // body head height is 0 (a typical fresh node), oldest_height will be 0 - // body head height is 10,001 (but at a fork with depth 1), oldest_height will be 10,000 - // body head height is 10,005 (but at a fork with depth 5), oldest_height will be 10,000 - debug!( - "{}: need a state sync for txhashset. oldest block which is not on local chain: {} at {}", - caller, oldest_hash, oldest_height, - ); - } else { - // this is the abnormal case, when is_on_current_chain() always return Err, and even for genesis block. - error!("{}: corrupted storage? state sync is needed", caller); - } - Ok(true) - } else { - Ok(false) - } + let header_head = self.header_head()?; + let horizon = global::cut_through_horizon() as u64; + Ok(fork_point.height < header_head.height.saturating_sub(horizon)) } /// Clean the temporary sandbox folder @@ -1158,13 +1302,13 @@ impl Chain { txhashset_data: File, status: &dyn TxHashsetWriteStatus, ) -> Result { - status.on_setup(); + status.on_setup(None, None, None, None); // Initial check whether this txhashset is needed or not - let mut hashes: Option> = None; - if !self.check_txhashset_needed("txhashset_write".to_owned(), &mut hashes)? { + let fork_point = self.fork_point()?; + if !self.check_txhashset_needed(&fork_point)? { warn!("txhashset_write: txhashset received but it's not needed! ignored."); - return Err(ErrorKind::InvalidTxHashSet("not needed".to_owned()).into()); + return Err(Error::InvalidTxHashSet("not needed".to_owned())); } let header = match self.get_block_header(&h) { @@ -1198,7 +1342,13 @@ impl Chain { let header_pmmr = self.header_pmmr.read(); let batch = self.store.batch()?; - txhashset.verify_kernel_pos_index(&self.genesis, &header_pmmr, &batch)?; + txhashset.verify_kernel_pos_index( + &self.genesis.header, + &header_pmmr, + &batch, + None, + None, + )?; } // all good, prepare a new batch and update all the required records @@ -1216,8 +1366,15 @@ impl Chain { // Validate the extension, generating the utxo_sum and kernel_sum. // Full validation, including rangeproofs and kernel signature verification. - let (utxo_sum, kernel_sum) = - extension.validate(&self.genesis, false, status, &header)?; + let (utxo_sum, kernel_sum) = extension.validate( + &self.genesis.header, + false, + status, + None, + None, + &header, + None, + )?; // Save the block_sums (utxo_sum, kernel_sum) to the db for use later. batch.save_block_sums( @@ -1291,21 +1448,31 @@ impl Chain { fn remove_historical_blocks( &self, header_pmmr: &txhashset::PMMRHandle, + archive_header: BlockHeader, batch: &store::Batch<'_>, ) -> Result<(), Error> { - if self.archive_mode { + if self.archive_mode() { return Ok(()); } - let horizon = global::cut_through_horizon() as u64; + let mut horizon = global::cut_through_horizon() as u64; + let head = batch.head()?; let tail = match batch.tail() { Ok(tail) => tail, - Err(_) => Tip::from_header(&self.genesis), + Err(_) => Tip::from_header(&self.genesis.header), }; - let cutoff = head.height.saturating_sub(horizon); + let mut cutoff = head.height.saturating_sub(horizon); + + // TODO: Check this, compaction selects a different horizon + // block from txhashset horizon/PIBD segmenter when using + // Automated testing chain + if archive_header.height < cutoff { + cutoff = archive_header.height; + horizon = head.height - archive_header.height; + } debug!( "remove_historical_blocks: head height: {}, tail height: {}, horizon: {}, cutoff: {}", @@ -1321,10 +1488,9 @@ impl Chain { let tail = batch.get_block_header(&tail_hash)?; // Remove old blocks (including short lived fork blocks) which height < tail.height - // here b is a block - for (_, b) in batch.blocks_iter()? { - if b.header.height < tail.height { - let _ = batch.delete_block(&b.hash()); + for block in batch.blocks_iter()? { + if block.header.height < tail.height { + let _ = batch.delete_block(&block.hash()); count += 1; } } @@ -1362,6 +1528,10 @@ impl Chain { } } + // Retrieve archive header here, so as not to attempt a read + // lock while removing historical blocks + let archive_header = self.txhashset_archive_header()?; + // Take a write lock on the txhashet and start a new writeable db batch. let header_pmmr = self.header_pmmr.read(); let mut txhashset = self.txhashset.write(); @@ -1380,13 +1550,14 @@ impl Chain { } // If we are not in archival mode remove historical blocks from the db. - if !self.archive_mode { - self.remove_historical_blocks(&header_pmmr, &batch)?; + if !self.archive_mode() { + self.remove_historical_blocks(&header_pmmr, archive_header, &batch)?; } // Make sure our output_pos index is consistent with the UTXO set. txhashset.init_output_pos_index(&header_pmmr, &batch)?; + // TODO - Why is this part of chain compaction? // Rebuild our NRD kernel_pos index based on recent kernel history. txhashset.init_recent_kernel_pos_index(&header_pmmr, &batch)?; @@ -1426,16 +1597,15 @@ impl Chain { let txhashset = self.txhashset.read(); let last_index = match max_pmmr_index { Some(i) => i, - None => txhashset.highest_output_insertion_index(), + None => txhashset.output_mmr_size(), }; let outputs = txhashset.outputs_by_pmmr_index(start_index, max_count, max_pmmr_index); let rangeproofs = txhashset.rangeproofs_by_pmmr_index(start_index, max_count, max_pmmr_index); if outputs.0 != rangeproofs.0 || outputs.1.len() != rangeproofs.1.len() { - return Err(ErrorKind::TxHashSetErr(String::from( + return Err(Error::TxHashSetErr(String::from( "Output and rangeproof sets don't match", - )) - .into()); + ))); } let mut output_vec: Vec = vec![]; for (ref x, &y) in outputs.1.iter().zip(rangeproofs.1.iter()) { @@ -1455,13 +1625,14 @@ impl Chain { None => self.head_header()?.height, }; // Return headers at the given heights - let prev_to_start_header = - self.get_header_by_height(start_block_height.saturating_sub(1))?; - let end_header = self.get_header_by_height(end_block_height)?; - Ok(( - prev_to_start_header.output_mmr_size + 1, - end_header.output_mmr_size, - )) + let start_mmr_size = if start_block_height == 0 { + 0 + } else { + self.get_header_by_height(start_block_height - 1)? + .output_mmr_size + 1 + }; + let end_mmr_size = self.get_header_by_height(end_block_height)?.output_mmr_size; + Ok((start_mmr_size, end_mmr_size)) } /// Orphans pool size @@ -1473,56 +1644,63 @@ impl Chain { pub fn head(&self) -> Result { self.store .head() - .map_err(|e| ErrorKind::StoreErr(e, "chain head".to_owned()).into()) + .map_err(|e| Error::StoreErr(e, "chain head".to_owned())) } /// Tail of the block chain in this node after compact (cross-block cut-through) pub fn tail(&self) -> Result { self.store .tail() - .map_err(|e| ErrorKind::StoreErr(e, "chain tail".to_owned()).into()) + .map_err(|e| Error::StoreErr(e, "chain tail".to_owned())) } /// Tip (head) of the header chain. pub fn header_head(&self) -> Result { self.store .header_head() - .map_err(|e| ErrorKind::StoreErr(e, "header head".to_owned()).into()) + .map_err(|e| Error::StoreErr(e, "header head".to_owned())) } /// Block header for the chain head pub fn head_header(&self) -> Result { self.store .head_header() - .map_err(|e| ErrorKind::StoreErr(e, "chain head header".to_owned()).into()) + .map_err(|e| Error::StoreErr(e, "chain head header".to_owned())) } /// Gets a block by hash pub fn get_block(&self, h: &Hash) -> Result { self.store .get_block(h) - .map_err(|e| ErrorKind::StoreErr(e, "chain get block".to_owned()).into()) + .map_err(|e| Error::StoreErr(e, "chain get block".to_owned())) + } + + /// Gets the earliest stored block (tail) + pub fn get_tail(&self) -> Result { + self.store + .tail() + .map_err(|e| Error::StoreErr(e, "chain get tail".to_owned())) } /// Gets a block header by hash pub fn get_block_header(&self, h: &Hash) -> Result { self.store .get_block_header(h) - .map_err(|e| ErrorKind::StoreErr(e, "chain get header".to_owned()).into()) + .map_err(|e| Error::StoreErr(e, "chain get header".to_owned())) } /// Get previous block header. pub fn get_previous_header(&self, header: &BlockHeader) -> Result { self.store .get_previous_header(header) - .map_err(|e| ErrorKind::StoreErr(e, "chain get previous header".to_owned()).into()) + .map_err(|e| Error::StoreErr(e, "chain get previous header".to_owned())) } /// Get block_sums by header hash. pub fn get_block_sums(&self, h: &Hash) -> Result { self.store .get_block_sums(h) - .map_err(|e| ErrorKind::StoreErr(e, "chain get block_sums".to_owned()).into()) + .map_err(|e| Error::StoreErr(e, "chain get block_sums".to_owned())) } /// Gets the block header at the provided height. @@ -1541,13 +1719,59 @@ impl Chain { /// Migrate our local db from v2 to v3. /// "commit only" inputs. fn migrate_db_v2_v3(store: &ChainStore) -> Result<(), Error> { - let store_v2 = store.with_version(ProtocolVersion(2)); - let batch = store_v2.batch()?; - for (_, block) in batch.blocks_iter()? { - batch.migrate_block(&block, ProtocolVersion(3))?; + if store.batch()?.is_blocks_v3_migrated()? { + // Previously migrated so skipping. + debug!("migrate_db_v2_v3: previously migrated, skipping"); + return Ok(()); } - batch.commit()?; - Ok(()) + let mut total = 0; + let mut keys_to_migrate = vec![]; + for (k, v) in store.batch()?.blocks_raw_iter()? { + total += 1; + + // We want to migrate all blocks that cannot be read via v3 protocol version. + let block_v3: Result = ser::deserialize( + &mut Cursor::new(&v), + ProtocolVersion(3), + ser::DeserializationMode::default(), + ); + if block_v3.is_err() { + let block_v2: Result = ser::deserialize( + &mut Cursor::new(&v), + ProtocolVersion(2), + ser::DeserializationMode::default(), + ); + if block_v2.is_ok() { + keys_to_migrate.push(k); + } + } + } + debug!( + "migrate_db_v2_v3: {} (of {}) blocks to migrate", + keys_to_migrate.len(), + total, + ); + let mut count = 0; + keys_to_migrate + .chunks(100) + .try_for_each(|keys| { + let batch = store.batch()?; + for key in keys { + batch.migrate_block(&key, ProtocolVersion(2), ProtocolVersion(3))?; + count += 1; + } + batch.commit()?; + debug!("migrate_db_v2_v3: successfully migrated {} blocks", count); + Ok(()) + }) + .and_then(|_| { + // Set flag to indicate we have migrated all blocks in the db. + // We will skip migration in the future. + let batch = store.batch()?; + batch.set_blocks_v3_migrated(true)?; + batch.commit()?; + Ok(()) + }) } /// Gets the block header in which a given output appears in the txhashset. @@ -1557,11 +1781,10 @@ impl Chain { let (_, pos) = match txhashset.get_unspent(commit)? { Some(o) => o, None => { - return Err(ErrorKind::OutputNotFound(format!( + return Err(Error::OutputNotFound(format!( "Not found commit {}", commit.to_hex() - )) - .into()) + ))) } }; let hash = header_pmmr.get_header_hash_by_height(pos.height)?; @@ -1661,31 +1884,35 @@ impl Chain { /// Verifies the given block header is actually on the current chain. /// Checks the header_by_height index to verify the header is where we say /// it is - pub fn is_on_current_chain(&self, header: &BlockHeader) -> Result<(), Error> { - let chain_header = self.get_header_by_height(header.height)?; - if chain_header.hash() == header.hash() { + fn is_on_current_chain>(&self, x: T, head: Tip) -> Result<(), Error> { + let x: Tip = x.into(); + if x.height > head.height { + return Err(Error::Other("not on current chain".to_string())); + } + + if x.hash() == self.get_header_hash_by_height(x.height)? { Ok(()) } else { - Err(ErrorKind::Other(format!("header is not on current chain")).into()) + Err(Error::Other(format!("header is not on current chain"))) } } - /// Get the tip of the current "sync" header chain. - /// This may be significantly different to current header chain. - pub fn get_sync_head(&self) -> Result { - let hash = self.sync_pmmr.read().head_hash()?; - let header = self.store.get_block_header(&hash)?; - Ok(Tip::from_header(&header)) - } - /// Gets multiple headers at the provided heights. /// Note: Uses the sync pmmr, not the header pmmr. - pub fn get_locator_hashes(&self, heights: &[u64]) -> Result, Error> { - let pmmr = self.sync_pmmr.read(); - heights - .iter() - .map(|h| pmmr.get_header_hash_by_height(*h)) - .collect() + /// Note: This is based on the provided sync_head to support syncing against a fork. + pub fn get_locator_hashes(&self, sync_head: Tip, heights: &[u64]) -> Result, Error> { + let mut header_pmmr = self.header_pmmr.write(); + txhashset::header_extending_readonly(&mut header_pmmr, &self.store(), |ext, batch| { + let header = batch.get_block_header(&sync_head.hash())?; + self.rewind_and_apply_header_fork(&header, ext, batch)?; + + let hashes = heights + .iter() + .filter_map(|h| ext.get_header_hash_by_height(*h)) + .collect(); + + Ok(hashes) + }) } /// Builds an iterator on blocks starting from the current chain head and @@ -1701,7 +1928,7 @@ impl Chain { pub fn block_exists(&self, h: Hash) -> Result { self.store .block_exists(&h) - .map_err(|e| ErrorKind::StoreErr(e, "chain block exists".to_owned()).into()) + .map_err(|e| Error::StoreErr(e, "chain block exists".to_owned())) } } @@ -1709,8 +1936,8 @@ fn setup_head( genesis: &Block, store: &store::ChainStore, header_pmmr: &mut txhashset::PMMRHandle, - sync_pmmr: &mut txhashset::PMMRHandle, txhashset: &mut txhashset::TxHashSet, + resetting_pibd: bool, ) -> Result<(), Error> { let mut batch = store.batch()?; @@ -1720,17 +1947,11 @@ fn setup_head( batch.save_block_header(&genesis.header)?; } - if header_pmmr.last_pos == 0 { + if header_pmmr.size == 0 { txhashset::header_extending(header_pmmr, &mut batch, |ext, _| { ext.apply_header(&genesis.header) })?; } - - if sync_pmmr.last_pos == 0 { - txhashset::header_extending(sync_pmmr, &mut batch, |ext, _| { - ext.apply_header(&genesis.header) - })?; - } } // Make sure our header PMMR is consistent with header_head from db if it exists. @@ -1758,10 +1979,32 @@ fn setup_head( // Note: We are rewinding and validating against a writeable extension. // If validation is successful we will truncate the backend files // to match the provided block header. - let header = batch.get_block_header(&head.last_block_h)?; + let mut pibd_in_progress = false; + let header = { + let head = batch.get_block_header(&head.last_block_h)?; + let pibd_tip = store.pibd_head()?; + let pibd_head = batch.get_block_header(&pibd_tip.last_block_h)?; + if pibd_head.height > head.height && !resetting_pibd { + pibd_in_progress = true; + pibd_head + } else { + head + } + }; let res = txhashset::extending(header_pmmr, txhashset, &mut batch, |ext, batch| { - pipe::rewind_and_apply_fork(&header, ext, batch)?; + // If we're still downloading via PIBD, don't worry about sums and validations just yet + // We still want to rewind to the last completed block to ensure a consistent state + if pibd_in_progress { + debug!( + "init: PIBD appears to be in progress at height {}, hash {}, not validating, will attempt to continue", + header.height, + header.hash() + ); + return Ok(()); + } + + pipe::rewind_and_apply_fork(&header, ext, batch, &|_| Ok(()))?; let extension = &mut ext.extension; @@ -1809,7 +2052,7 @@ fn setup_head( let prev_header = batch.get_block_header(&head.prev_block_h)?; txhashset::extending(header_pmmr, txhashset, &mut batch, |ext, batch| { - pipe::rewind_and_apply_fork(&prev_header, ext, batch) + pipe::rewind_and_apply_fork(&prev_header, ext, batch, &|_| Ok(())) })?; // Now "undo" the latest block and forget it ever existed. @@ -1851,7 +2094,7 @@ fn setup_head( info!("init: saved genesis: {:?}", genesis.hash()); } - Err(e) => return Err(ErrorKind::StoreErr(e, "chain init load head".to_owned()).into()), + Err(e) => return Err(Error::StoreErr(e, "chain init load head".to_owned())), }; batch.commit()?; Ok(()) diff --git a/chain/src/error.rs b/chain/src/error.rs index fe0f9039e2..07d7b41d4e 100644 --- a/chain/src/error.rs +++ b/chain/src/error.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -13,276 +13,227 @@ // limitations under the License. //! Error types for chain +use crate::core::core; +use crate::core::core::pmmr::segment; use crate::core::core::{block, committed, transaction}; use crate::core::ser; use crate::keychain; use crate::util::secp; use crate::util::secp::pedersen::Commitment; -use failure::{Backtrace, Context, Fail}; +use grin_core::core::hash::Hash; use grin_store as store; -use std::fmt::{self, Display}; use std::io; -/// Error definition -#[derive(Debug, Fail)] -pub struct Error { - inner: Context, -} - /// Chain error definitions -#[derive(Clone, Eq, PartialEq, Debug, Fail)] -pub enum ErrorKind { +#[derive(Clone, Eq, PartialEq, Debug, thiserror::Error)] +pub enum Error { /// The block doesn't fit anywhere in our chain - #[fail(display = "Block is unfit: {}", _0)] + #[error("Block is unfit: {0}")] Unfit(String), /// Special case of orphan blocks - #[fail(display = "Orphan, {}", _0)] + #[error("Orphan, {0}")] Orphan(String), /// Difficulty is too low either compared to ours or the block PoW hash - #[fail(display = "Difficulty is too low compared to ours or the block PoW hash")] + #[error("Difficulty is too low compared to ours or the block PoW hash")] DifficultyTooLow, /// Addition of difficulties on all previous block is wrong - #[fail(display = "Addition of difficulties on all previous blocks is wrong")] + #[error("Addition of difficulties on all previous blocks is wrong")] WrongTotalDifficulty, /// Block header edge_bits is lower than our min - #[fail(display = "Cuckoo Size too small")] + #[error("Cuckoo Size too small")] LowEdgebits, /// Block header invalid hash, explicitly rejected - #[fail(display = "Block hash explicitly rejected by chain")] + #[error("Block hash explicitly rejected by chain")] InvalidHash, /// Scaling factor between primary and secondary PoW is invalid - #[fail(display = "Wrong scaling factor")] + #[error("Wrong scaling factor")] InvalidScaling, /// The proof of work is invalid - #[fail(display = "Invalid PoW")] + #[error("Invalid PoW")] InvalidPow, /// Peer abusively sending us an old block we already have - #[fail(display = "Old Block")] + #[error("Old Block")] OldBlock, - /// The block doesn't sum correctly or a tx signature is invalid - #[fail(display = "Invalid Block Proof, {}", _0)] - InvalidBlockProof(block::Error), /// Block time is too old - #[fail(display = "Invalid Block Time")] + #[error("Invalid Block Time")] InvalidBlockTime, /// Block height is invalid (not previous + 1) - #[fail(display = "Invalid Block Height")] + #[error("Invalid Block Height")] InvalidBlockHeight, /// One of the root hashes in the block is invalid - #[fail(display = "Invalid Root, {}", _0)] + #[error("Invalid Root, {0}")] InvalidRoot(String), /// One of the MMR sizes in the block header is invalid - #[fail(display = "Invalid MMR Size")] + #[error("Invalid MMR Size")] InvalidMMRSize, /// Error from underlying keychain impl - #[fail(display = "Keychain Error, {}", _0)] - Keychain(keychain::Error), + #[error("Keychain Error, {source:?}")] + Keychain { + #[from] + /// Conversion + source: keychain::Error, + }, /// Error from underlying secp lib - #[fail(display = "Secp Lib Error, {}", _0)] - Secp(secp::Error), + #[error("Secp Lib Error, {source:?}")] + Secp { + #[from] + /// Conversion + source: secp::Error, + }, /// One of the inputs in the block has already been spent - #[fail(display = "Already Spent: {:?}", _0)] + #[error("Already Spent: {0:?}")] AlreadySpent(Commitment), /// An output with that commitment already exists (should be unique) - #[fail(display = "Duplicate Commitment: {:?}", _0)] + #[error("Duplicate Commitment: {0:?}")] DuplicateCommitment(Commitment), /// Attempt to spend a coinbase output before it sufficiently matures. - #[fail(display = "Attempt to spend immature coinbase")] + #[error("Attempt to spend immature coinbase")] ImmatureCoinbase, /// Error validating a Merkle proof (coinbase output) - #[fail(display = "Error validating merkle proof, {}", _0)] + #[error("Error validating merkle proof, {0}")] MerkleProof(String), /// Output not found - #[fail(display = "Output not found, {}", _0)] + #[error("Output not found, {0}")] OutputNotFound(String), /// Rangeproof not found - #[fail(display = "Rangeproof not found, {}", _0)] + #[error("Rangeproof not found, {0}")] RangeproofNotFound(String), /// Tx kernel not found - #[fail(display = "Tx kernel not found")] + #[error("Tx kernel not found")] TxKernelNotFound, /// output spent - #[fail(display = "Output is spent")] + #[error("Output is spent")] OutputSpent, /// Invalid block version, either a mistake or outdated software - #[fail(display = "Invalid Block Version: {:?}", _0)] + #[error("Invalid Block Version: {0:?}")] InvalidBlockVersion(block::HeaderVersion), /// We've been provided a bad txhashset - #[fail(display = "Invalid TxHashSet: {}", _0)] + #[error("Invalid TxHashSet: {0}")] InvalidTxHashSet(String), /// Internal issue when trying to save or load data from store - #[fail(display = "Chain Store Error: {}, reason: {}", _1, _0)] + #[error("Chain Store Error: {1}, reason: {0}")] StoreErr(store::Error, String), /// Internal issue when trying to save or load data from append only files - #[fail(display = "Chain File Read Error: {}", _0)] + #[error("Chain File Read Error: {0}")] FileReadErr(String), /// Error serializing or deserializing a type - #[fail(display = "Chain Serialization Error, {}", _0)] - SerErr(ser::Error), + #[error("Chain Serialization Error, {source:?}")] + SerErr { + #[from] + /// Conversion + source: ser::Error, + }, /// Error with the txhashset - #[fail(display = "TxHashSetErr: {}", _0)] + #[error("TxHashSetErr: {0}")] TxHashSetErr(String), /// Tx not valid based on lock_height. - #[fail(display = "Invalid Transaction Lock Height")] + #[error("Invalid Transaction Lock Height")] TxLockHeight, /// Tx is not valid due to NRD relative_height restriction. - #[fail(display = "NRD Relative Height")] + #[error("NRD Relative Height")] NRDRelativeHeight, /// No chain exists and genesis block is required - #[fail(display = "Genesis Block Required")] + #[error("Genesis Block Required")] GenesisBlockRequired, /// Error from underlying tx handling - #[fail(display = "Transaction Validation Error: {:?}", _0)] - Transaction(transaction::Error), + #[error("Transaction Validation Error: {source:?}")] + Transaction { + /// Conversion + #[from] + source: transaction::Error, + }, /// Error from underlying block handling - #[fail(display = "Block Validation Error: {:?}", _0)] + #[error("Block Validation Error: {0:?}")] Block(block::Error), + /// Attempt to retrieve a header at a height greater than + /// the max allowed by u64 limits + #[error("Invalid Header Height: {0:?}")] + InvalidHeaderHeight(u64), /// Anything else - #[fail(display = "Chain other Error: {}", _0)] + #[error("Chain other Error: {0}")] Other(String), /// Error from summing and verifying kernel sums via committed trait. - #[fail( - display = "Committed Trait: Error summing and verifying kernel sums, {}", - _0 - )] - Committed(committed::Error), + #[error("Committed Trait: Error summing and verifying kernel sums, {source:?}")] + Committed { + #[from] + /// Conversion + source: committed::Error, + }, /// We cannot process data once the Grin server has been stopped. - #[fail(display = "Stopped (MWC Shutting Down)")] + #[error("Stopped (MWC Shutting Down)")] Stopped, /// Internal Roaring Bitmap error - #[fail(display = "Roaring Bitmap error")] + #[error("Roaring Bitmap error")] Bitmap, /// Error during chain sync - #[fail(display = "Sync error")] + #[error("Sync error")] SyncError(String), -} - -impl Display for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let cause = match self.cause() { - Some(c) => format!("{}", c), - None => String::from("Unknown"), - }; - let backtrace = match self.backtrace() { - Some(b) => format!("{}", b), - None => String::from("Unknown"), - }; - let output = format!( - "{} \n Cause: {} \n Backtrace: {}", - self.inner, cause, backtrace - ); - Display::fmt(&output, f) - } + /// PIBD segment related error + #[error("Segment error, {source}")] + SegmentError { + #[from] + /// Conversion + source: segment::SegmentError, + }, + /// We've decided to halt the PIBD process due to lack of supporting peers or + /// otherwise failing to progress for a certain amount of time + #[error("Aborting PIBD error")] + AbortingPIBDError, + /// The segmenter is associated to a different block header + #[error("Segmenter header mismatch, available {0} at height {1}")] + SegmenterHeaderMismatch(Hash, u64), + /// Segment height not within allowed range + #[error("Invalid segment height")] + InvalidSegmentHeight, + /// Error from the core calls + #[error("Core error, {source:?}")] + CoreErr { + /// Source error + #[from] + source: core::Error, + }, + /// Other issue with segment + #[error("Invalid segment: {0}")] + InvalidSegment(String), + /// The blockchain is in sync process, not all data is available + #[error("Chain is syncing, data is not complete")] + ChainInSyncing(String), + /// Invalid bitmap root hash. Probably old traffic or somebody attacking as + #[error("Invalid bitmap root hash")] + InvalidBitmapRoot, } impl Error { - /// get kind - pub fn kind(&self) -> ErrorKind { - self.inner.get_context().clone() - } - /// get cause - pub fn cause(&self) -> Option<&dyn Fail> { - self.inner.cause() - } - /// get backtrace - pub fn backtrace(&self) -> Option<&Backtrace> { - self.inner.backtrace() - } - /// Whether the error is due to a block that was intrinsically wrong pub fn is_bad_data(&self) -> bool { // shorter to match on all the "not the block's fault" errors - match self.kind() { - ErrorKind::Unfit(_) - | ErrorKind::Orphan(_) - | ErrorKind::StoreErr(_, _) - | ErrorKind::SerErr(_) - | ErrorKind::TxHashSetErr(_) - | ErrorKind::GenesisBlockRequired - | ErrorKind::Other(_) => false, + match self { + Error::Unfit(_) + | Error::Orphan(_) + | Error::StoreErr(_, _) + | Error::SerErr { .. } + | Error::TxHashSetErr(_) + | Error::GenesisBlockRequired + | Error::Other(_) => false, _ => true, } } } -impl From for Error { - fn from(kind: ErrorKind) -> Error { - Error { - inner: Context::new(kind), - } - } -} - -impl From> for Error { - fn from(inner: Context) -> Error { - Error { inner: inner } - } -} - -impl From for Error { - fn from(error: block::Error) -> Error { - let ec = error.clone(); - Error { - inner: error.context(ErrorKind::InvalidBlockProof(ec)), - } - } -} - impl From for Error { fn from(error: store::Error) -> Error { - let ec = error.clone(); - Error { - //inner: error.context();Context::new(ErrorKind::StoreErr(error.clone(), - // format!("{:?}", error))), - inner: error.context(ErrorKind::StoreErr(ec.clone(), format!("{:?}", ec))), - } - } -} - -impl From for Error { - fn from(error: keychain::Error) -> Error { - Error { - inner: Context::new(ErrorKind::Keychain(error)), - } - } -} - -impl From for Error { - fn from(error: transaction::Error) -> Error { - Error { - inner: Context::new(ErrorKind::Transaction(error)), - } - } -} - -impl From for Error { - fn from(error: committed::Error) -> Error { - Error { - inner: Context::new(ErrorKind::Committed(error)), - } + Error::StoreErr(error.clone(), format!("{:?}", error)) } } impl From for Error { fn from(e: io::Error) -> Error { - Error { - inner: Context::new(ErrorKind::TxHashSetErr(e.to_string())), - } - } -} - -impl From for Error { - fn from(error: ser::Error) -> Error { - Error { - inner: Context::new(ErrorKind::SerErr(error)), - } + Error::TxHashSetErr(e.to_string()) } } -impl From for Error { - fn from(e: secp::Error) -> Error { - Error { - inner: Context::new(ErrorKind::Secp(e)), - } +impl From for Error { + fn from(e: block::Error) -> Error { + Error::Block(e) } } diff --git a/chain/src/lib.rs b/chain/src/lib.rs index 61a05fac78..125ee3a907 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -33,7 +33,6 @@ extern crate log; #[macro_use] extern crate lazy_static; -use failure; use grin_core as core; use grin_keychain as keychain; use grin_util as util; @@ -41,6 +40,7 @@ use grin_util as util; mod chain; mod error; pub mod linked_list; +pub mod pibd_params; pub mod pipe; pub mod store; pub mod txhashset; @@ -49,7 +49,7 @@ pub mod types; // Re-export the base interface pub use crate::chain::{Chain, BLOCK_TO_BAN, MAX_ORPHAN_SIZE}; -pub use crate::error::{Error, ErrorKind}; +pub use crate::error::Error; pub use crate::store::ChainStore; pub use crate::types::{ BlockStatus, ChainAdapter, Options, SyncState, SyncStatus, Tip, TxHashsetDownloadStats, diff --git a/chain/src/linked_list.rs b/chain/src/linked_list.rs index d36640681c..85478e1b09 100644 --- a/chain/src/linked_list.rs +++ b/chain/src/linked_list.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -88,7 +88,7 @@ pub trait ListIndex { /// Key is "prefix|commit". /// Note the key for an individual entry in the list is "prefix|commit|pos". fn get_list(&self, batch: &Batch<'_>, commit: Commitment) -> Result, Error> { - batch.db.get_ser(&self.list_key(commit)) + batch.db.get_ser(&self.list_key(commit), None) } /// Returns one of "head", "tail" or "middle" entry variants. @@ -99,7 +99,7 @@ pub trait ListIndex { commit: Commitment, pos: u64, ) -> Result, Error> { - batch.db.get_ser(&self.entry_key(commit, pos)) + batch.db.get_ser(&self.entry_key(commit, pos), None) } /// Peek the head of the list for the specified commitment. @@ -394,12 +394,12 @@ impl PruneableListIndex for MultiIndex { let mut list_count = 0; let mut entry_count = 0; let prefix = to_key(self.list_prefix, ""); - for (key, _) in batch.db.iter::>(&prefix)? { + for key in batch.db.iter(&prefix, |k, _| Ok(k.to_vec()))? { let _ = batch.delete(&key); list_count += 1; } let prefix = to_key(self.entry_prefix, ""); - for (key, _) in batch.db.iter::>(&prefix)? { + for key in batch.db.iter(&prefix, |k, _| Ok(k.to_vec()))? { let _ = batch.delete(&key); entry_count += 1; } diff --git a/chain/src/pibd_params.rs b/chain/src/pibd_params.rs new file mode 100644 index 0000000000..307c7c8238 --- /dev/null +++ b/chain/src/pibd_params.rs @@ -0,0 +1,51 @@ +// Copyright 2022 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Set of static definitions for all parameters related to PIBD and Desegmentation +//! Note these are for experimentation via compilation, not meant to be exposed as +//! configuration parameters anywhere + +/// Bitmap segment height assumed for requests and segment calculation +pub const BITMAP_SEGMENT_HEIGHT: u8 = 9; + +/// Output segment height assumed for requests and segment calculation +pub const OUTPUT_SEGMENT_HEIGHT: u8 = 11; + +/// Rangeproof segment height assumed for requests and segment calculation +pub const RANGEPROOF_SEGMENT_HEIGHT: u8 = 11; + +/// Kernel segment height assumed for requests and segment calculation +pub const KERNEL_SEGMENT_HEIGHT: u8 = 11; + +/// Maximum number of received segments to cache (across all trees) before we stop requesting others +pub const MAX_CACHED_SEGMENTS: usize = 15; + +/// How long the state sync should wait after requesting a segment from a peer before +/// deciding the segment isn't going to arrive. The syncer will then re-request the segment +pub const SEGMENT_REQUEST_TIMEOUT_SECS: i64 = 60; + +/// Number of simultaneous requests for segments we should make per available peer. Note this is currently +/// divisible by 3 to try and evenly spread requests amount the 3 main MMRs (Bitmap segments +/// will always be requested first) +pub const SEGMENT_REQUEST_PER_PEER: usize = 3; +/// Maximum number of simultaneous requests. Please note, the data will be processed in a single thread, so +/// the throughput will not be high. 12 should load CPU pretty well at the end of sync process. +pub const SEGMENT_REQUEST_LIMIT: usize = 12; + +/// Maximum stale requests per peer. If there are more requests, no new data will be requested +pub const STALE_REQUESTS_PER_PEER: u32 = 5; + +/// If the syncer hasn't seen a max work peer that supports PIBD in this number of seconds +/// give up and revert back to the txhashset.zip download method +pub const TXHASHSET_ZIP_FALLBACK_TIME_SECS: i64 = 60 + SEGMENT_REQUEST_TIMEOUT_SECS * 2; diff --git a/chain/src/pipe.rs b/chain/src/pipe.rs index 666fed7647..2b6c017dd7 100644 --- a/chain/src/pipe.rs +++ b/chain/src/pipe.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,23 +15,20 @@ //! Implementation of the chain block acceptance (or refusal) pipeline. use crate::core::consensus; -use crate::core::core::hash::Hashed; -use crate::core::core::verifier_cache::VerifierCache; +use crate::core::core::hash::{Hash, Hashed}; use crate::core::core::Committed; use crate::core::core::{ block, Block, BlockHeader, BlockSums, HeaderVersion, OutputIdentifier, TransactionBody, }; use crate::core::global; use crate::core::pow; -use crate::error::{Error, ErrorKind}; +use crate::error::Error; use crate::store; use crate::txhashset; use crate::types::{CommitPos, Options, Tip}; -use crate::util::RwLock; -use grin_core::core::hash::Hash; +use grin_util::RwLock; use std::collections::HashSet; use std::iter::FromIterator; -use std::sync::Arc; /// Contextual information required to process a new block and either reject or /// accept it. @@ -40,14 +37,14 @@ pub struct BlockContext<'a> { pub opts: Options, /// The pow verifier to use when processing a block. pub pow_verifier: fn(&BlockHeader) -> Result<(), pow::Error>, + /// Custom fn allowing arbitrary header validation rules (denylist) to be applied. + pub header_allowed: Box Result<(), Error>>, /// The active txhashset (rewindable MMRs) to use for block processing. pub txhashset: &'a mut txhashset::TxHashSet, /// The active header MMR handle. pub header_pmmr: &'a mut txhashset::PMMRHandle, /// The active batch to use for block processing. pub batch: store::Batch<'a>, - /// The verifier cache (caching verifier for rangeproofs and kernel signatures) - pub verifier_cache: Arc>, } lazy_static! { @@ -60,7 +57,7 @@ pub fn init_invalid_lock_hashes(hashed: &Option>) -> Result<(), Erro let mut hashes = INVALID_BLOCK_HASHES.write(); for h in hs { hashes.insert(Hash::from_hex(h).map_err(|e| { - ErrorKind::Other(format!("Unable to harse hash hex string {}, {}", h, e)) + Error::Other(format!("Unable to parse hash hex string {}, {}", h, e)) })?); } } @@ -106,7 +103,7 @@ pub fn check_against_spent_output( "output contains spent commtiment:{:?} from local branch", commit ); - return Err(ErrorKind::Other( + return Err(Error::Other( "output invalid, could be a replay attack".to_string(), ) .into()); @@ -121,7 +118,7 @@ pub fn check_against_spent_output( "output contains spent commtiment:{:?} from the main chain", commit ); - return Err(ErrorKind::Other( + return Err(Error::Other( "output invalid, could be a replay attack".to_string(), ) .into()); @@ -140,7 +137,7 @@ fn validate_pow_only(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result let hash = header.hash(); if INVALID_BLOCK_HASHES.read().contains(&hash) { error!("Invalid header found: {}. Rejecting it!", hash); - return Err(ErrorKind::InvalidHash.into()); + return Err(Error::InvalidHash.into()); } if ctx.opts.contains(Options::SKIP_POW) { @@ -148,14 +145,14 @@ fn validate_pow_only(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result return Ok(()); } if !header.pow.is_primary() && !header.pow.is_secondary() { - return Err(ErrorKind::LowEdgebits.into()); + return Err(Error::LowEdgebits); } if (ctx.pow_verifier)(header).is_err() { error!( "pipe: error validating header with cuckoo edge_bits {}", header.pow.edge_bits(), ); - return Err(ErrorKind::InvalidPow.into()); + return Err(Error::InvalidPow); } Ok(()) } @@ -206,8 +203,10 @@ pub fn process_block( let header_pmmr = &mut ctx.header_pmmr; let txhashset = &mut ctx.txhashset; let batch = &mut ctx.batch; + let ctx_specific_validation = &ctx.header_allowed; let fork_point = txhashset::extending(header_pmmr, txhashset, batch, |ext, batch| { - let fork_point_local_blocks = rewind_and_apply_fork(&prev, ext, batch)?; + let fork_point_local_blocks = + rewind_and_apply_fork(&prev, ext, batch, ctx_specific_validation)?; let fork_point = fork_point_local_blocks.0; let local_branch_blocks = fork_point_local_blocks.1; @@ -285,43 +284,55 @@ pub fn replay_attack_check( Ok(()) } -/// Sync a chunk of block headers. +/// Process a batch of sequential block headers. /// This is only used during header sync. -pub fn sync_block_headers( +/// Will update header_head locally if this batch of headers increases total work. +/// Returns the updated sync_head, which may be on a fork. +pub fn process_block_headers( headers: &[BlockHeader], + sync_head: Tip, ctx: &mut BlockContext<'_>, -) -> Result<(), Error> { +) -> Result, Error> { if headers.is_empty() { - return Ok(()); + return Ok(None); } let last_header = headers.last().expect("last header"); - // Check if we know about all these headers. If so we can accept them quickly. - // If they *do not* increase total work on the sync chain we are done. - // If they *do* increase total work then we should process them to update sync_head. - let sync_head = { - let hash = ctx.header_pmmr.head_hash()?; - let header = ctx.batch.get_block_header(&hash)?; - Tip::from_header(&header) - }; - - if let Ok(existing) = ctx.batch.get_block_header(&last_header.hash()) { - if !has_more_work(&existing, &sync_head) { - return Ok(()); - } - } + let head = ctx.batch.header_head()?; // Validate each header in the chunk and add to our db. // Note: This batch may be rolled back later if the MMR does not validate successfully. + // Note: This batch may later be committed even if the MMR itself is rollbacked. for header in headers { validate_header(header, ctx)?; add_block_header(header, &ctx.batch)?; } - // Now apply this entire chunk of headers to the sync MMR (ctx is sync MMR specific). + let ctx_specific_validation = &ctx.header_allowed; + + // Now apply this entire chunk of headers to the header MMR. txhashset::header_extending(&mut ctx.header_pmmr, &mut ctx.batch, |ext, batch| { - rewind_and_apply_header_fork(&last_header, ext, batch)?; - Ok(()) + rewind_and_apply_header_fork(&last_header, ext, batch, ctx_specific_validation)?; + + // If previous sync_head is not on the "current" chain then + // these headers are on an alternative fork to sync_head. + let alt_fork = !ext.is_on_current_chain(sync_head, batch)?; + + // Update our "header_head" if this batch results in an increase in total work. + // Otherwise rollback this header extension. + // Note the outer batch may still be committed to db assuming no errors occur in the extension. + if has_more_work(last_header, &head) { + let header_head = last_header.into(); + update_header_head(&header_head, &batch)?; + } else { + ext.force_rollback(); + }; + + if alt_fork || has_more_work(last_header, &sync_head) { + Ok(Some(last_header.into())) + } else { + Ok(None) + } }) } @@ -356,10 +367,12 @@ pub fn process_block_header(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> // We want to validate this individual header before applying it to our header PMMR. validate_header(header, ctx)?; + let ctx_specific_validation = &ctx.header_allowed; + // Apply the header to the header PMMR, making sure we put the extension in the correct state // based on previous header first. txhashset::header_extending(&mut ctx.header_pmmr, &mut ctx.batch, |ext, batch| { - rewind_and_apply_header_fork(&prev_header, ext, batch)?; + rewind_and_apply_header_fork(&prev_header, ext, batch, ctx_specific_validation)?; ext.validate_root(header)?; ext.apply_header(header)?; if !has_more_work(&header, &header_head) { @@ -383,7 +396,7 @@ pub fn process_block_header(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> fn check_known_head(header: &BlockHeader, head: &Tip) -> Result<(), Error> { let bh = header.hash(); if bh == head.last_block_h || bh == head.prev_block_h { - return Err(ErrorKind::Unfit("already known in head".to_string()).into()); + return Err(Error::Unfit("already known in head".to_string())); } Ok(()) } @@ -400,16 +413,16 @@ fn check_known_store( // TODO - we flag this as an "abusive peer" but only in the case // where we have the full block in our store. // So this is not a particularly exhaustive check. - Err(ErrorKind::OldBlock.into()) + Err(Error::OldBlock) } else { - Err(ErrorKind::Unfit("already known in store".to_string()).into()) + Err(Error::Unfit("already known in store".to_string())) } } Ok(false) => { // Not yet processed this block, we can proceed. Ok(()) } - Err(e) => Err(ErrorKind::StoreErr(e, "pipe get this block".to_owned()).into()), + Err(e) => Err(Error::StoreErr(e, "pipe get this block".to_owned())), } } @@ -428,33 +441,69 @@ fn check_bad_header(header: &BlockHeader) -> Result<(), Error> { "00020440a401086e57e1b7a92ebb0277c7f7fd47a38269ecc6789c2a80333725", )?]; if bad_hashes.contains(&header.hash()) { - Err(ErrorKind::InvalidBlockProof(block::Error::Other("explicit bad header".into())).into()) + Err(Error::Block(block::Error::Other( + "explicit bad header".into(), + ))) } else { Ok(()) } } +/// Apply any "header_invalidated" (aka denylist) rules provided as part of the context. +fn validate_header_ctx(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<(), Error> { + // Apply any custom header validation rules via the context. + (ctx.header_allowed)(header) +} + +/// Validate header against an explicit "denylist" of header hashes. +/// Returns a "Block" error which is "bad_data" and will result in peer being banned. +pub fn validate_header_denylist(header: &BlockHeader, denylist: &[Hash]) -> Result<(), Error> { + if denylist.is_empty() { + return Ok(()); + } + + // Assume our denylist is a manageable size for now. + // Log it here to occasionally remind us. + debug!( + "validate_header_denylist: {} at {}, denylist: {:?}", + header.hash(), + header.height, + denylist + ); + + if denylist.contains(&header.hash()) { + return Err(Error::Block(block::Error::Other( + "header hash denied".into(), + ))); + } else { + return Ok(()); + } +} + /// First level of block validation that only needs to act on the block header /// to make it as cheap as possible. The different validations are also /// arranged by order of cost to have as little DoS surface as possible. fn validate_header(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<(), Error> { + // Apply any ctx specific header validation (denylist) rules. + validate_header_ctx(header, ctx)?; + // First I/O cost, delayed as late as possible. let prev = prev_header_store(header, &mut ctx.batch)?; // This header height must increase the height from the previous header by exactly 1. if header.height != prev.height + 1 { - return Err(ErrorKind::InvalidBlockHeight.into()); + return Err(Error::InvalidBlockHeight); } // This header must have a valid header version for its height. if !consensus::valid_header_version(header.height, header.version) { - return Err(ErrorKind::InvalidBlockVersion(header.version).into()); + return Err(Error::InvalidBlockVersion(header.version)); } if header.timestamp <= prev.timestamp { // prevent time warp attacks and some timestamp manipulations by forcing strict // time progression - return Err(ErrorKind::InvalidBlockTime.into()); + return Err(Error::InvalidBlockTime); } // Check the header hash against a list of known bad headers. @@ -471,13 +520,13 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<( // Each block must contain at least 1 kernel and 1 output for the block reward. if num_outputs == 0 || num_kernels == 0 { - return Err(ErrorKind::InvalidMMRSize.into()); + return Err(Error::InvalidMMRSize); } // Block header is invalid (and block is invalid) if this lower bound is too heavy for a full block. - let weight = TransactionBody::weight_as_block(0, num_outputs, num_kernels); + let weight = TransactionBody::weight_by_iok(0, num_outputs, num_kernels); if weight > global::max_block_weight() { - return Err(ErrorKind::Block(block::Error::TooHeavy).into()); + return Err(Error::Block(block::Error::TooHeavy)); } // verify the proof of work and related parameters @@ -492,13 +541,13 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<( validate_pow_only(header, ctx)?; if header.total_difficulty() <= prev.total_difficulty() { - return Err(ErrorKind::DifficultyTooLow.into()); + return Err(Error::DifficultyTooLow); } let target_difficulty = header.total_difficulty() - prev.total_difficulty(); if header.pow.to_difficulty(header.height) < target_difficulty { - return Err(ErrorKind::DifficultyTooLow.into()); + return Err(Error::DifficultyTooLow); } // explicit check to ensure total_difficulty has increased by exactly @@ -513,7 +562,7 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<( target_difficulty.to_num(), next_header_info.difficulty.to_num() ); - return Err(ErrorKind::WrongTotalDifficulty.into()); + return Err(Error::WrongTotalDifficulty); } // check the secondary PoW scaling factor if applicable if header.pow.secondary_scaling != next_header_info.secondary_scaling { @@ -521,7 +570,7 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<( "validate_header: header secondary scaling {} != {}", header.pow.secondary_scaling, next_header_info.secondary_scaling ); - return Err(ErrorKind::InvalidScaling.into()); + return Err(Error::InvalidScaling); } } @@ -531,8 +580,8 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<( fn validate_block(block: &Block, ctx: &mut BlockContext<'_>) -> Result<(), Error> { let prev = ctx.batch.get_previous_header(&block.header)?; block - .validate(&prev.total_kernel_offset, ctx.verifier_cache.clone()) - .map_err(ErrorKind::InvalidBlockProof)?; + .validate(&prev.total_kernel_offset) + .map_err(|e| Error::Block(e))?; Ok(()) } @@ -604,7 +653,7 @@ fn update_body_tail(bh: &BlockHeader, batch: &store::Batch<'_>) -> Result<(), Er let tip = Tip::from_header(bh); batch .save_body_tail(&tip) - .map_err(|e| ErrorKind::StoreErr(e, "pipe save body tail".to_owned()))?; + .map_err(|e| Error::StoreErr(e, "pipe save body tail".to_owned()))?; debug!("body tail {} @ {}", bh.hash(), bh.height); Ok(()) } @@ -613,14 +662,14 @@ fn update_body_tail(bh: &BlockHeader, batch: &store::Batch<'_>) -> Result<(), Er fn add_block_header(bh: &BlockHeader, batch: &store::Batch<'_>) -> Result<(), Error> { batch .save_block_header(bh) - .map_err(|e| ErrorKind::StoreErr(e, "pipe save header".to_owned()))?; + .map_err(|e| Error::StoreErr(e, "pipe save header".to_owned()))?; Ok(()) } -fn update_header_head(head: &Tip, batch: &mut store::Batch<'_>) -> Result<(), Error> { +fn update_header_head(head: &Tip, batch: &store::Batch<'_>) -> Result<(), Error> { batch .save_header_head(&head) - .map_err(|e| ErrorKind::StoreErr(e, "pipe save header head".to_owned()))?; + .map_err(|e| Error::StoreErr(e, "pipe save header head".to_owned()))?; debug!( "header head updated to {} at {}", @@ -630,10 +679,10 @@ fn update_header_head(head: &Tip, batch: &mut store::Batch<'_>) -> Result<(), Er Ok(()) } -fn update_head(head: &Tip, batch: &mut store::Batch<'_>) -> Result<(), Error> { +fn update_head(head: &Tip, batch: &store::Batch<'_>) -> Result<(), Error> { batch .save_body_head(&head) - .map_err(|e| ErrorKind::StoreErr(e, "pipe save body".to_owned()))?; + .map_err(|e| Error::StoreErr(e, "pipe save body".to_owned()))?; debug!("head updated to {} at {}", head.last_block_h, head.height); @@ -650,10 +699,11 @@ pub fn rewind_and_apply_header_fork( header: &BlockHeader, ext: &mut txhashset::HeaderExtension<'_>, batch: &store::Batch<'_>, + ctx_specific_validation: &dyn Fn(&BlockHeader) -> Result<(), Error>, ) -> Result<(), Error> { let mut fork_hashes = vec![]; let mut current = header.clone(); - while current.height > 0 && ext.is_on_current_chain(¤t, batch).is_err() { + while current.height > 0 && !ext.is_on_current_chain(¤t, batch)? { fork_hashes.push(current.hash()); current = batch.get_previous_header(¤t)?; } @@ -668,7 +718,12 @@ pub fn rewind_and_apply_header_fork( for h in fork_hashes { let header = batch .get_block_header(&h) - .map_err(|e| ErrorKind::StoreErr(e, "getting forked headers".to_string()))?; + .map_err(|e| Error::StoreErr(e, "getting forked headers".to_string()))?; + + // Re-validate every header being re-applied. + // This makes it possible to check all header hashes against the ctx specific "denylist". + (ctx_specific_validation)(&header)?; + ext.validate_root(&header)?; ext.apply_header(&header)?; } @@ -685,20 +740,17 @@ pub fn rewind_and_apply_fork( header: &BlockHeader, ext: &mut txhashset::ExtensionPair<'_>, batch: &store::Batch<'_>, + ctx_specific_validation: &dyn Fn(&BlockHeader) -> Result<(), Error>, ) -> Result<(BlockHeader, Vec), Error> { let extension = &mut ext.extension; let header_extension = &mut ext.header_extension; // Prepare the header MMR. - rewind_and_apply_header_fork(header, header_extension, batch)?; + rewind_and_apply_header_fork(header, header_extension, batch, ctx_specific_validation)?; // Rewind the txhashset extension back to common ancestor based on header MMR. let mut current = batch.head_header()?; - while current.height > 0 - && header_extension - .is_on_current_chain(¤t, batch) - .is_err() - { + while current.height > 0 && !header_extension.is_on_current_chain(¤t, batch)? { current = batch.get_previous_header(¤t)?; } let fork_point = current; @@ -714,10 +766,10 @@ pub fn rewind_and_apply_fork( } fork_hashes.reverse(); - for h in fork_hashes.clone() { + for h in &fork_hashes { let fb = batch .get_block(&h) - .map_err(|e| ErrorKind::StoreErr(e, "getting forked blocks".to_string()))?; + .map_err(|e| Error::StoreErr(e, "getting forked blocks".to_string()))?; // Re-verify coinbase maturity along this fork. verify_coinbase_maturity(&fb, ext, batch)?; diff --git a/chain/src/store.rs b/chain/src/store.rs index c5bebf01b1..695c31d608 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,18 +14,19 @@ //! Implements storage primitives required by the chain -use crate::core::consensus::HeaderInfo; +use crate::core::consensus::HeaderDifficultyInfo; use crate::core::core::hash::{Hash, Hashed}; use crate::core::core::{Block, BlockHeader, BlockSums, Inputs}; +use crate::core::global; use crate::core::pow::Difficulty; -use crate::core::ser::ProtocolVersion; +use crate::core::ser::{DeserializationMode, ProtocolVersion, Readable, Writeable}; use crate::linked_list::MultiIndex; use crate::types::{CommitPos, HashHeight, Tip}; use crate::util::secp::pedersen::Commitment; - -use croaring::{Bitmap, Portable}; +use croaring::Bitmap; +use grin_core::ser; use grin_store as store; -use grin_store::{option_to_not_found, to_key, Error, SerIterator}; +use grin_store::{option_to_not_found, to_key, Error}; use std::convert::TryInto; use std::sync::Arc; @@ -35,6 +36,7 @@ const BLOCK_HEADER_PREFIX: u8 = b'h'; const BLOCK_PREFIX: u8 = b'b'; const HEAD_PREFIX: u8 = b'H'; const TAIL_PREFIX: u8 = b'T'; +const PIBD_HEAD_PREFIX: u8 = b'I'; const HEADER_HEAD_PREFIX: u8 = b'G'; const OUTPUT_POS_PREFIX: u8 = b'p'; @@ -48,6 +50,11 @@ const BLOCK_SUMS_PREFIX: u8 = b'M'; const BLOCK_SPENT_PREFIX: u8 = b'S'; const BLOCK_SPENT_COMMITMENT_PREFIX: u8 = b'C'; +/// Prefix for various boolean flags stored in the db. +const BOOL_FLAG_PREFIX: u8 = b'B'; +/// Boolean flag for v3 migration. +const BLOCKS_V3_MIGRATED: &str = "blocks_v3_migrated"; + /// All chain-related database operations pub struct ChainStore { db: store::Store, @@ -60,32 +67,33 @@ impl ChainStore { Ok(ChainStore { db }) } - /// Create a new instance of the chain store based on this instance - /// but with the provided protocol version. This is used when migrating - /// data in the db to a different protocol version, reading using one version and - /// writing back to the db with a different version. - pub fn with_version(&self, version: ProtocolVersion) -> ChainStore { - let db_with_version = self.db.with_version(version); - ChainStore { - db: db_with_version, - } - } - /// The current chain head. pub fn head(&self) -> Result { - option_to_not_found(self.db.get_ser(&[HEAD_PREFIX]), || "HEAD".to_owned()) + option_to_not_found(self.db.get_ser(&[HEAD_PREFIX], None), || "HEAD".to_owned()) } /// The current header head (may differ from chain head). pub fn header_head(&self) -> Result { - option_to_not_found(self.db.get_ser(&[HEADER_HEAD_PREFIX]), || { + option_to_not_found(self.db.get_ser(&[HEADER_HEAD_PREFIX], None), || { "HEADER_HEAD".to_owned() }) } /// The current chain "tail" (earliest block in the store). pub fn tail(&self) -> Result { - option_to_not_found(self.db.get_ser(&[TAIL_PREFIX]), || "TAIL".to_owned()) + option_to_not_found(self.db.get_ser(&[TAIL_PREFIX], None), || "TAIL".to_owned()) + } + + /// The current PIBD head (will differ from the other heads. Return genesis block if PIBD head doesn't exist). + pub fn pibd_head(&self) -> Result { + let res = option_to_not_found(self.db.get_ser(&[PIBD_HEAD_PREFIX], None), || { + "PIBD_HEAD".to_owned() + }); + + match res { + Ok(r) => Ok(r), + Err(_) => Ok(Tip::from_header(&global::get_genesis_block().header)), + } } /// Header of the block at the head of the block chain (not the same thing as header_head). @@ -95,7 +103,7 @@ impl ChainStore { /// Get full block. pub fn get_block(&self, h: &Hash) -> Result { - option_to_not_found(self.db.get_ser(&to_key(BLOCK_PREFIX, h)), || { + option_to_not_found(self.db.get_ser(&to_key(BLOCK_PREFIX, h), None), || { format!("BLOCK: {}", h) }) } @@ -107,7 +115,7 @@ impl ChainStore { /// Get block_sums for the block hash. pub fn get_block_sums(&self, h: &Hash) -> Result { - option_to_not_found(self.db.get_ser(&to_key(BLOCK_SUMS_PREFIX, h)), || { + option_to_not_found(self.db.get_ser(&to_key(BLOCK_SUMS_PREFIX, h), None), || { format!("Block sums for block: {}", h) }) } @@ -117,17 +125,38 @@ impl ChainStore { self.get_block_header(&header.prev_hash) } + /// Get previous header without deserializing the proof nonces + pub fn get_previous_header_skip_proof( + &self, + header: &BlockHeader, + ) -> Result { + self.get_block_header_skip_proof(&header.prev_hash) + } + /// Get block header. pub fn get_block_header(&self, h: &Hash) -> Result { - option_to_not_found(self.db.get_ser(&to_key(BLOCK_HEADER_PREFIX, h)), || { - format!("BLOCK HEADER: {}", h) - }) + option_to_not_found( + self.db.get_ser(&to_key(BLOCK_HEADER_PREFIX, h), None), + || format!("BLOCK HEADER: {}", h), + ) + } + + /// Get block header without deserializing the full PoW Proof; currently used + /// for difficulty iterator which is called many times but doesn't need the proof + pub fn get_block_header_skip_proof(&self, h: &Hash) -> Result { + option_to_not_found( + self.db.get_ser( + &to_key(BLOCK_HEADER_PREFIX, h), + Some(ser::DeserializationMode::SkipPow), + ), + || format!("BLOCK HEADER: {}", h), + ) } /// Get PMMR pos for the given output commitment. pub fn get_output_pos(&self, commit: &Commitment) -> Result { match self.get_output_pos_height(commit)? { - Some(pos) => Ok(pos.pos), + Some(pos) => Ok(pos.pos - 1), None => Err(Error::NotFoundErr(format!( "Output position for: {:?}", commit @@ -137,7 +166,7 @@ impl ChainStore { /// Get PMMR pos and block height for the given output commitment. pub fn get_output_pos_height(&self, commit: &Commitment) -> Result, Error> { - self.db.get_ser(&to_key(OUTPUT_POS_PREFIX, commit)) + self.db.get_ser(&to_key(OUTPUT_POS_PREFIX, commit), None) } /// Builds a new batch to be used with this store. @@ -158,17 +187,17 @@ pub struct Batch<'a> { impl<'a> Batch<'a> { /// The head. pub fn head(&self) -> Result { - option_to_not_found(self.db.get_ser(&[HEAD_PREFIX]), || "HEAD".to_owned()) + option_to_not_found(self.db.get_ser(&[HEAD_PREFIX], None), || "HEAD".to_owned()) } /// The tail. pub fn tail(&self) -> Result { - option_to_not_found(self.db.get_ser(&[TAIL_PREFIX]), || "TAIL".to_owned()) + option_to_not_found(self.db.get_ser(&[TAIL_PREFIX], None), || "TAIL".to_owned()) } /// The current header head (may differ from chain head). pub fn header_head(&self) -> Result { - option_to_not_found(self.db.get_ser(&[HEADER_HEAD_PREFIX]), || { + option_to_not_found(self.db.get_ser(&[HEADER_HEAD_PREFIX], None), || { "HEADER_HEAD".to_owned() }) } @@ -193,9 +222,14 @@ impl<'a> Batch<'a> { self.db.put_ser(&[HEADER_HEAD_PREFIX], t) } + /// Save PIBD head to db. + pub fn save_pibd_head(&self, t: &Tip) -> Result<(), Error> { + self.db.put_ser(&[PIBD_HEAD_PREFIX], t) + } + /// get block pub fn get_block(&self, h: &Hash) -> Result { - option_to_not_found(self.db.get_ser(&to_key(BLOCK_PREFIX, h)), || { + option_to_not_found(self.db.get_ser(&to_key(BLOCK_PREFIX, h), None), || { format!("Block with hash: {}", h) }) } @@ -232,7 +266,7 @@ impl<'a> Batch<'a> { pub fn save_spent_commitments(&self, spent: &Commitment, hh: HashHeight) -> Result<(), Error> { let hash_list = self .db - .get_ser(&to_key(BLOCK_SPENT_COMMITMENT_PREFIX, spent))?; + .get_ser(&to_key(BLOCK_SPENT_COMMITMENT_PREFIX, spent), None)?; let mut spent_list; if let Some(list) = hash_list { spent_list = list; @@ -253,7 +287,7 @@ impl<'a> Batch<'a> { spent: &Commitment, ) -> Result>, Error> { self.db - .get_ser(&to_key(BLOCK_SPENT_COMMITMENT_PREFIX, spent)) + .get_ser(&to_key(BLOCK_SPENT_COMMITMENT_PREFIX, spent), None) } // /// An iterator to all "spent" commit in db @@ -262,11 +296,42 @@ impl<'a> Batch<'a> { // self.db.iter(&key) // } - /// Migrate a block stored in the db by serializing it using the provided protocol version. - /// Block may have been read using a previous protocol version but we do not actually care. - pub fn migrate_block(&self, b: &Block, version: ProtocolVersion) -> Result<(), Error> { - self.db - .put_ser_with_version(&to_key(BLOCK_PREFIX, b.hash())[..], b, version)?; + /// DB flag representing full migration of blocks to v3 version. + /// Default to false if flag not present. + pub fn is_blocks_v3_migrated(&self) -> Result { + let migrated: Option = self + .db + .get_ser(&to_key(BOOL_FLAG_PREFIX, BLOCKS_V3_MIGRATED), None)?; + match migrated { + None => Ok(false), + Some(x) => Ok(x.into()), + } + } + + /// Set DB flag representing full migration of blocks to v3 version. + pub fn set_blocks_v3_migrated(&self, migrated: bool) -> Result<(), Error> { + self.db.put_ser( + &to_key(BOOL_FLAG_PREFIX, BLOCKS_V3_MIGRATED)[..], + &BoolFlag(migrated), + )?; + Ok(()) + } + + /// Migrate a block stored in the db reading from one protocol version and writing + /// with new protocol version. + pub fn migrate_block( + &self, + key: &[u8], + from_version: ProtocolVersion, + to_version: ProtocolVersion, + ) -> Result<(), Error> { + let block: Option = self.db.get_with(key, move |_, mut v| { + ser::deserialize(&mut v, from_version, ser::DeserializationMode::default()) + .map_err(From::from) + })?; + if let Some(block) = block { + self.db.put_ser_with_version(key, &block, to_version)?; + } Ok(()) } @@ -274,6 +339,7 @@ impl<'a> Batch<'a> { pub fn delete(&self, key: &[u8]) -> Result<(), Error> { self.db.delete(key) } + /// Delete a full block. Does not delete any record associated with a block /// header. pub fn delete_block(&self, bh: &Hash) -> Result<(), Error> { @@ -292,6 +358,8 @@ impl<'a> Batch<'a> { } } + self.db.delete(&to_key(BLOCK_PREFIX, bh)[..])?; + // Best effort at deleting associated data for this block. // Not an error if these fail. { @@ -299,8 +367,6 @@ impl<'a> Batch<'a> { let _ = self.delete_spent_index(bh); } - self.db.delete(&to_key(BLOCK_PREFIX, bh)[..])?; - Ok(()) } @@ -362,15 +428,20 @@ impl<'a> Batch<'a> { } /// Iterator over the output_pos index. - pub fn output_pos_iter(&self) -> Result, Error> { + pub fn output_pos_iter(&self) -> Result, CommitPos)>, Error> { let key = to_key(OUTPUT_POS_PREFIX, ""); - self.db.iter(&key) + let protocol_version = self.db.protocol_version(); + self.db.iter(&key, move |k, mut v| { + ser::deserialize(&mut v, protocol_version, DeserializationMode::default()) + .map(|pos| (k.to_vec(), pos)) + .map_err(From::from) + }) } /// Get output_pos from index. pub fn get_output_pos(&self, commit: &Commitment) -> Result { match self.get_output_pos_height(commit)? { - Some(pos) => Ok(pos.pos), + Some(pos) => Ok(pos.pos - 1), None => Err(Error::NotFoundErr(format!( "Output position for: {:?}", commit @@ -380,7 +451,7 @@ impl<'a> Batch<'a> { /// Get output_pos and block height from index. pub fn get_output_pos_height(&self, commit: &Commitment) -> Result, Error> { - self.db.get_ser(&to_key(OUTPUT_POS_PREFIX, commit)) + self.db.get_ser(&to_key(OUTPUT_POS_PREFIX, commit), None) } /// Get the previous header. @@ -388,11 +459,33 @@ impl<'a> Batch<'a> { self.get_block_header(&header.prev_hash) } + /// Get the previous header, without deserializing the full PoW Proof (or the ability to derive the + /// block hash, this is used for the difficulty iterator). + pub fn get_previous_header_skip_proof( + &self, + header: &BlockHeader, + ) -> Result { + self.get_block_header_skip_proof(&header.prev_hash) + } + /// Get block header. pub fn get_block_header(&self, h: &Hash) -> Result { - option_to_not_found(self.db.get_ser(&to_key(BLOCK_HEADER_PREFIX, h)), || { - format!("BLOCK HEADER: {}", h) - }) + option_to_not_found( + self.db.get_ser(&to_key(BLOCK_HEADER_PREFIX, h), None), + || format!("BLOCK HEADER: {}", h), + ) + } + + /// Get block header without deserializing the full PoW Proof; currently used + /// for difficulty iterator which is called many times but doesn't need the proof + pub fn get_block_header_skip_proof(&self, h: &Hash) -> Result { + option_to_not_found( + self.db.get_ser( + &to_key(BLOCK_HEADER_PREFIX, h), + Some(ser::DeserializationMode::SkipPow), + ), + || format!("BLOCK HEADER: {}", h), + ) } /// Delete the block spent index. @@ -410,7 +503,7 @@ impl<'a> Batch<'a> { /// Get block_sums for the block. pub fn get_block_sums(&self, h: &Hash) -> Result { - option_to_not_found(self.db.get_ser(&to_key(BLOCK_SUMS_PREFIX, h)), || { + option_to_not_found(self.db.get_ser(&to_key(BLOCK_SUMS_PREFIX, h), None), || { format!("Block sums for block: {}", h) }) } @@ -436,10 +529,10 @@ impl<'a> Batch<'a> { fn get_legacy_input_bitmap(&self, bh: &Hash) -> Result { option_to_not_found( - self.db.get_with( - &to_key(BLOCK_INPUT_BITMAP_PREFIX, bh), - Bitmap::deserialize::, - ), + self.db + .get_with(&to_key(BLOCK_INPUT_BITMAP_PREFIX, bh), move |_, data| { + Ok(Bitmap::deserialize::(data)) + }), || "legacy block input bitmap".to_string(), ) } @@ -447,9 +540,10 @@ impl<'a> Batch<'a> { /// Get the "spent index" from the db for the specified block. /// If we need to rewind a block then we use this to "unspend" the spent outputs. pub fn get_spent_index(&self, bh: &Hash) -> Result, Error> { - option_to_not_found(self.db.get_ser(&to_key(BLOCK_SPENT_PREFIX, bh)), || { - format!("spent index: {}", bh) - }) + option_to_not_found( + self.db.get_ser(&to_key(BLOCK_SPENT_PREFIX, bh), None), + || format!("spent index: {}", bh), + ) } /// Commits this batch. If it's a child batch, it will be merged with the @@ -466,10 +560,27 @@ impl<'a> Batch<'a> { }) } - /// An iterator to all block in db - pub fn blocks_iter(&self) -> Result, Error> { + /// Iterator over all full blocks in the db. + /// Uses default db serialization strategy via db protocol version. + pub fn blocks_iter(&self) -> Result, Error> { let key = to_key(BLOCK_PREFIX, ""); - self.db.iter(&key) + let protocol_version = self.db.protocol_version(); + self.db.iter(&key, move |_, mut v| { + ser::deserialize(&mut v, protocol_version, DeserializationMode::default()) + .map_err(From::from) + }) + } + + /// Iterator over raw data for full blocks in the db. + /// Used during block migration (we need flexibility around deserialization). + pub fn blocks_raw_iter(&self) -> Result, Vec)>, Error> { + let key = to_key(BLOCK_PREFIX, ""); + self.db.iter(&key, |k, v| Ok((k.to_vec(), v.to_vec()))) + } + + /// Protocol version of our underlying db. + pub fn protocol_version(&self) -> ProtocolVersion { + self.db.protocol_version() } } @@ -488,6 +599,7 @@ pub struct DifficultyIter<'a> { // toward the genesis block (while maintaining current state) header: Option, prev_header: Option, + prev_header_hash: Option, } impl<'a> DifficultyIter<'a> { @@ -500,6 +612,7 @@ impl<'a> DifficultyIter<'a> { batch: None, header: None, prev_header: None, + prev_header_hash: None, } } @@ -512,39 +625,54 @@ impl<'a> DifficultyIter<'a> { batch: Some(batch), header: None, prev_header: None, + prev_header_hash: None, } } } impl<'a> Iterator for DifficultyIter<'a> { - type Item = HeaderInfo; + type Item = HeaderDifficultyInfo; fn next(&mut self) -> Option { // Get both header and previous_header if this is the initial iteration. // Otherwise move prev_header to header and get the next prev_header. - self.header = if self.header.is_none() { + // Note that due to optimizations being called in `get_block_header_skip_proof`, + // Items returned by this iterator cannot be expected to correctly + // calculate their own hash - This iterator is purely for iterating through + // difficulty information + let (cur_header, cur_header_hash) = if self.header.is_none() { if let Some(ref batch) = self.batch { - batch.get_block_header(&self.start).ok() + ( + batch.get_block_header_skip_proof(&self.start).ok(), + Some(self.start), + ) } else if let Some(ref store) = self.store { - store.get_block_header(&self.start).ok() + ( + store.get_block_header_skip_proof(&self.start).ok(), + Some(self.start), + ) } else { - None + (None, None) } } else { - self.prev_header.clone() + (self.prev_header.clone(), self.prev_header_hash) }; + self.header = cur_header; + // If we have a header we can do this iteration. // Otherwise we are done. if let Some(header) = self.header.clone() { if let Some(ref batch) = self.batch { - self.prev_header = batch.get_previous_header(&header).ok(); + self.prev_header = batch.get_previous_header_skip_proof(&header).ok(); } else if let Some(ref store) = self.store { - self.prev_header = store.get_previous_header(&header).ok(); + self.prev_header = store.get_previous_header_skip_proof(&header).ok(); } else { self.prev_header = None; } + self.prev_header_hash = Some(header.prev_hash); + let prev_difficulty = self .prev_header .clone() @@ -552,8 +680,8 @@ impl<'a> Iterator for DifficultyIter<'a> { let difficulty = header.total_difficulty() - prev_difficulty; let scaling = header.pow.secondary_scaling; - Some(HeaderInfo::new( - header.hash(), + Some(HeaderDifficultyInfo::new( + cur_header_hash, header.timestamp.timestamp() as u64, difficulty, scaling, @@ -571,3 +699,25 @@ impl<'a> Iterator for DifficultyIter<'a> { pub fn nrd_recent_kernel_index() -> MultiIndex { MultiIndex::init(NRD_KERNEL_LIST_PREFIX, NRD_KERNEL_ENTRY_PREFIX) } + +struct BoolFlag(bool); + +impl From for bool { + fn from(b: BoolFlag) -> Self { + b.0 + } +} + +impl Readable for BoolFlag { + fn read(reader: &mut R) -> Result { + let x = reader.read_u8()?; + Ok(BoolFlag(1 & x == 1)) + } +} + +impl Writeable for BoolFlag { + fn write(&self, writer: &mut W) -> Result<(), ser::Error> { + writer.write_u8(self.0.into())?; + Ok(()) + } +} diff --git a/chain/src/txhashset.rs b/chain/src/txhashset.rs index 094df1cb96..0958ae9a20 100644 --- a/chain/src/txhashset.rs +++ b/chain/src/txhashset.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,11 +16,15 @@ //! kernel) more conveniently and transactionally. mod bitmap_accumulator; +mod desegmenter; mod rewindable_kernel_view; +mod segmenter; mod txhashset; mod utxo_view; pub use self::bitmap_accumulator::*; +pub use self::desegmenter::*; pub use self::rewindable_kernel_view::*; +pub use self::segmenter::*; pub use self::txhashset::*; pub use self::utxo_view::*; diff --git a/chain/src/txhashset/bitmap_accumulator.rs b/chain/src/txhashset/bitmap_accumulator.rs index bdc72b45b9..5c8b9c66f8 100644 --- a/chain/src/txhashset/bitmap_accumulator.rs +++ b/chain/src/txhashset/bitmap_accumulator.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::cmp::min; use std::convert::TryFrom; use std::time::Instant; @@ -19,9 +20,11 @@ use bit_vec::BitVec; use croaring::Bitmap; use crate::core::core::hash::{DefaultHashable, Hash}; -use crate::core::core::pmmr::{self, ReadonlyPMMR, VecBackend, PMMR}; +use crate::core::core::pmmr::segment::{Segment, SegmentIdentifier, SegmentProof}; +use crate::core::core::pmmr::{self, Backend, ReadablePMMR, ReadonlyPMMR, VecBackend, PMMR}; use crate::core::ser::{self, PMMRable, Readable, Reader, Writeable, Writer}; -use crate::error::{Error, ErrorKind}; +use crate::error::Error; +use enum_primitive::FromPrimitive; /// The "bitmap accumulator" allows us to commit to a specific bitmap by splitting it into /// fragments and inserting these fragments into an MMR to produce an overall root hash. @@ -47,10 +50,12 @@ pub struct BitmapAccumulator { } impl BitmapAccumulator { + const NBITS: u64 = BitmapChunk::LEN_BITS as u64; + /// Crate a new empty bitmap accumulator. pub fn new() -> BitmapAccumulator { BitmapAccumulator { - backend: VecBackend::new_hash_only(), + backend: VecBackend::new(), } } @@ -62,12 +67,12 @@ impl BitmapAccumulator { /// Find the start of the first "chunk" of 1024 bits from the provided idx. /// Zero the last 10 bits to round down to multiple of 1024. pub fn chunk_start_idx(idx: u64) -> u64 { - idx & !0x3ff + idx & !(Self::NBITS - 1) } /// The first 1024 belong to chunk 0, the next 1024 to chunk 1 etc. fn chunk_idx(idx: u64) -> u64 { - idx / 1024 + idx / Self::NBITS } /// Apply the provided idx iterator to our bitmap accumulator. @@ -88,12 +93,13 @@ impl BitmapAccumulator { let mut idx_iter = idx.into_iter().filter(|&x| x < size).peekable(); while let Some(x) = idx_iter.peek() { - if *x < chunk_idx * 1024 { + if *x < chunk_idx * Self::NBITS { + // NOTE we never get here if idx starts from from_idx // skip until we reach our first chunk idx_iter.next(); - } else if *x < (chunk_idx + 1) * 1024 { + } else if *x < (chunk_idx + 1) * Self::NBITS { let idx = idx_iter.next().expect("next after peek"); - chunk.set(idx % 1024, true); + chunk.set(idx % Self::NBITS, true); } else { self.append_chunk(chunk)?; chunk_idx += 1; @@ -121,6 +127,8 @@ impl BitmapAccumulator { /// If size is 1 then we will have a single chunk. /// If size is 1023 then we will have a single chunk (bits 0 to 1023 inclusive). /// If the size is 1024 then we will have two chunks. + /// TODO: first argument is an iterator for no good reason; + /// might as well pass from_idx as first argument pub fn apply(&mut self, invalidated_idx: T, idx: U, size: u64) -> Result<(), Error> where T: IntoIterator, @@ -146,10 +154,9 @@ impl BitmapAccumulator { let chunk_idx = BitmapAccumulator::chunk_idx(from_idx); let last_pos = self.backend.size(); let mut pmmr = PMMR::at(&mut self.backend, last_pos); - let chunk_pos = pmmr::insertion_to_pmmr_index(chunk_idx + 1); - let rewind_pos = chunk_pos.saturating_sub(1); + let rewind_pos = pmmr::insertion_to_pmmr_index(chunk_idx); pmmr.rewind(rewind_pos, &Bitmap::new()) - .map_err(|e| ErrorKind::Other(format!("pmmr rewind error, {}", e)))?; + .map_err(|e| Error::Other(format!("pmmr rewind error, {}", e)))?; Ok(()) } @@ -171,20 +178,35 @@ impl BitmapAccumulator { let last_pos = self.backend.size(); PMMR::at(&mut self.backend, last_pos) .push(&chunk) - .map_err(|e| { - ErrorKind::Other(format!("PMMR at for pos {} error, {}", last_pos, e)).into() - }) + .map_err(|e| Error::Other(format!("PMMR at for pos {} error, {}", last_pos, e))) } /// The root hash of the bitmap accumulator MMR. pub fn root(&self) -> Hash { - ReadonlyPMMR::at(&self.backend, self.backend.size()).root() + self.readonly_pmmr().root().expect("no root, invalid tree") + } + + /// Readonly access to our internal data. + pub fn readonly_pmmr(&self) -> ReadonlyPMMR> { + ReadonlyPMMR::at(&self.backend, self.backend.size()) + } + + /// Return a raw in-memory bitmap of this accumulator + pub fn as_bitmap(&self) -> Result { + let mut bitmap = Bitmap::new(); + for (chunk_index, chunk_pos) in self.backend.leaf_pos_iter().enumerate() { + //TODO: Unwrap + let chunk = self.backend.get_data(chunk_pos as u64).unwrap(); + let additive = chunk.set_iter(chunk_index * 1024).collect::>(); + bitmap.add_many(&additive); + } + Ok(bitmap) } } /// A bitmap "chunk" representing 1024 contiguous bits of the overall bitmap. /// The first 1024 bits belong in one chunk. The next 1024 bits in the next chunk, etc. -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq, Eq)] pub struct BitmapChunk(BitVec); impl BitmapChunk { @@ -209,6 +231,16 @@ impl BitmapChunk { pub fn any(&self) -> bool { self.0.any() } + + /// Iterator over the integer set represented by this chunk, applying the given + /// offset to the values + pub fn set_iter(&self, idx_offset: usize) -> impl Iterator + '_ { + self.0 + .iter() + .enumerate() + .filter(|(_, val)| *val) + .map(move |(idx, _)| (idx as u32 + idx_offset as u32)) + } } impl PMMRable for BitmapChunk { @@ -239,3 +271,338 @@ impl Readable for BitmapChunk { Ok(BitmapChunk::new()) } } + +/// +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct BitmapSegment { + identifier: SegmentIdentifier, + blocks: Vec, + proof: SegmentProof, +} + +impl Writeable for BitmapSegment { + fn write(&self, writer: &mut W) -> Result<(), ser::Error> { + Writeable::write(&self.identifier, writer)?; + writer.write_u16(self.blocks.len() as u16)?; + for block in &self.blocks { + Writeable::write(block, writer)?; + } + Writeable::write(&self.proof, writer)?; + Ok(()) + } +} + +impl Readable for BitmapSegment { + fn read(reader: &mut R) -> Result { + let identifier: SegmentIdentifier = Readable::read(reader)?; + + let n_blocks = reader.read_u16()? as usize; + let mut blocks = Vec::::with_capacity(n_blocks); + for _ in 0..n_blocks { + blocks.push(Readable::read(reader)?); + } + let proof = Readable::read(reader)?; + + Ok(Self { + identifier, + blocks, + proof, + }) + } +} + +// TODO: this can be sped up with some `unsafe` code +impl From> for BitmapSegment { + fn from(segment: Segment) -> Self { + let (identifier, _, _, _, leaf_data, proof) = segment.parts(); + + let mut chunks_left = leaf_data.len(); + let mut blocks = + Vec::with_capacity((chunks_left + BitmapBlock::NCHUNKS - 1) / BitmapBlock::NCHUNKS); + while chunks_left > 0 { + let n_chunks = min(BitmapBlock::NCHUNKS, chunks_left); + chunks_left = chunks_left.saturating_sub(n_chunks); + blocks.push(BitmapBlock::new(n_chunks)); + } + + for (chunk_idx, chunk) in leaf_data.into_iter().enumerate() { + assert_eq!(chunk.0.len(), BitmapChunk::LEN_BITS); + let block = &mut blocks + .get_mut(chunk_idx / BitmapBlock::NCHUNKS) + .unwrap() + .inner; + let offset = (chunk_idx % BitmapBlock::NCHUNKS) * BitmapChunk::LEN_BITS; + for (i, _) in chunk.0.iter().enumerate().filter(|&(_, v)| v) { + block.set(offset + i, true); + } + } + + Self { + identifier, + blocks, + proof, + } + } +} + +// TODO: this can be sped up with some `unsafe` code +impl From for Segment { + fn from(segment: BitmapSegment) -> Self { + let BitmapSegment { + identifier, + blocks, + proof, + } = segment; + + // Count the number of chunks taking into account that the final block might be smaller + let n_chunks = (blocks.len() - 1) * BitmapBlock::NCHUNKS + + blocks.last().map(|b| b.n_chunks()).unwrap_or(0); + let mut leaf_pos = Vec::with_capacity(n_chunks); + let mut chunks = Vec::with_capacity(n_chunks); + let offset = (1 << identifier.height) * identifier.idx; + for i in 0..(n_chunks as u64) { + leaf_pos.push(pmmr::insertion_to_pmmr_index(offset + i)); + chunks.push(BitmapChunk::new()); + } + + for (block_idx, block) in blocks.into_iter().enumerate() { + assert!(block.inner.len() <= BitmapBlock::NBITS as usize); + let offset = block_idx * BitmapBlock::NCHUNKS; + for (i, _) in block.inner.iter().enumerate().filter(|&(_, v)| v) { + chunks + .get_mut(offset + i / BitmapChunk::LEN_BITS) + .unwrap() + .0 + .set(i % BitmapChunk::LEN_BITS, true); + } + } + + Segment::from_parts(identifier, Vec::new(), Vec::new(), leaf_pos, chunks, proof) + } +} + +/// A block of 2^16 bits that provides an efficient (de)serialization +/// depending on the bitmap occupancy. +#[derive(Clone, Debug, PartialEq, Eq)] +struct BitmapBlock { + inner: BitVec, +} + +impl BitmapBlock { + /// Maximum number of bits in a block + const NBITS: u32 = 1 << 16; + /// Maximum number of chunks in a block + const NCHUNKS: usize = Self::NBITS as usize / BitmapChunk::LEN_BITS; + + fn new(n_chunks: usize) -> Self { + assert!(n_chunks <= BitmapBlock::NCHUNKS); + Self { + inner: BitVec::from_elem(n_chunks * BitmapChunk::LEN_BITS, false), + } + } + + fn n_chunks(&self) -> usize { + let length = self.inner.len(); + assert_eq!(length % BitmapChunk::LEN_BITS, 0); + let n_chunks = length / BitmapChunk::LEN_BITS; + assert!(n_chunks <= BitmapBlock::NCHUNKS); + n_chunks + } +} + +impl Writeable for BitmapBlock { + fn write(&self, writer: &mut W) -> Result<(), ser::Error> { + let length = self.inner.len(); + assert!(length <= Self::NBITS as usize); + assert_eq!(length % BitmapChunk::LEN_BITS, 0); + writer.write_u8((length / BitmapChunk::LEN_BITS) as u8)?; + + let count_pos = self.inner.iter().filter(|&v| v).count() as u32; + + // Negative count needs to be adjusted if the block is not full, + // which affects the choice of serialization mode and size written + let count_neg = length as u32 - count_pos; + + let threshold = Self::NBITS / 16; + if count_pos < threshold { + // Write positive indices + Writeable::write(&BitmapBlockSerialization::Positive, writer)?; + writer.write_u16(count_pos as u16)?; + for (i, _) in self.inner.iter().enumerate().filter(|&(_, v)| v) { + writer.write_u16(i as u16)?; + } + } else if count_neg < threshold { + // Write negative indices + Writeable::write(&BitmapBlockSerialization::Negative, writer)?; + writer.write_u16(count_neg as u16)?; + for (i, _) in self.inner.iter().enumerate().filter(|&(_, v)| !v) { + writer.write_u16(i as u16)?; + } + } else { + // Write raw bytes + Writeable::write(&BitmapBlockSerialization::Raw, writer)?; + let bytes = self.inner.to_bytes(); + assert!(bytes.len() <= Self::NBITS as usize / 8); + writer.write_fixed_bytes(&bytes)?; + } + + Ok(()) + } +} + +impl Readable for BitmapBlock { + fn read(reader: &mut R) -> Result { + let n_chunks = reader.read_u8()?; + if n_chunks as usize > BitmapBlock::NCHUNKS { + return Err(ser::Error::TooLargeReadErr(format!( + "Requested {} chunks, limit is {}", + n_chunks, + BitmapBlock::NCHUNKS + ))); + } + let n_bits = n_chunks as usize * BitmapChunk::LEN_BITS; + + let mode = Readable::read(reader)?; + let inner = match mode { + BitmapBlockSerialization::Raw => { + // Raw bytes + let bytes = reader.read_fixed_bytes(n_bits / 8)?; + BitVec::from_bytes(&bytes) + } + BitmapBlockSerialization::Positive => { + // Positive indices + let mut inner = BitVec::from_elem(n_bits, false); + let n = reader.read_u16()?; + for _ in 0..n { + inner.set(reader.read_u16()? as usize, true); + } + inner + } + BitmapBlockSerialization::Negative => { + // Negative indices + let mut inner = BitVec::from_elem(n_bits, true); + let n = reader.read_u16()?; + for _ in 0..n { + inner.set(reader.read_u16()? as usize, false); + } + inner + } + }; + + Ok(BitmapBlock { inner }) + } +} + +enum_from_primitive! { + #[derive(Debug, Clone, Copy, PartialEq)] + #[repr(u8)] + enum BitmapBlockSerialization { + Raw = 0, + Positive = 1, + Negative = 2, + } +} + +impl Writeable for BitmapBlockSerialization { + fn write(&self, writer: &mut W) -> Result<(), ser::Error> { + writer.write_u8(*self as u8) + } +} + +impl Readable for BitmapBlockSerialization { + fn read(reader: &mut R) -> Result { + Self::from_u8(reader.read_u8()?).ok_or(ser::Error::CorruptedData(format!( + "Failed to read the next byte" + ))) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::core::ser::{ + BinReader, BinWriter, DeserializationMode, ProtocolVersion, Readable, Writeable, + }; + use byteorder::ReadBytesExt; + use grin_util::secp::rand::Rng; + use rand::thread_rng; + use std::io::Cursor; + + fn test_roundtrip(entries: usize, inverse: bool, encoding: u8, length: usize, n_blocks: usize) { + let mut rng = thread_rng(); + let mut block = BitmapBlock::new(n_blocks); + if inverse { + block.inner.negate(); + } + + let range_size = n_blocks * BitmapChunk::LEN_BITS as usize; + + // Flip `entries` bits in random spots + let mut count = 0; + while count < entries { + let idx = rng.gen_range(0, range_size); + if block.inner.get(idx).unwrap() == inverse { + count += 1; + block.inner.set(idx, !inverse); + } + } + + // Serialize + let mut cursor = Cursor::new(Vec::::new()); + let mut writer = BinWriter::new(&mut cursor, ProtocolVersion(1)); + Writeable::write(&block, &mut writer).unwrap(); + + // Check encoding type and length + cursor.set_position(1); + assert_eq!(cursor.read_u8().unwrap(), encoding); + let actual_length = cursor.get_ref().len(); + assert_eq!(actual_length, length); + assert!(actual_length <= 2 + BitmapBlock::NBITS as usize / 8); + + // Deserialize + cursor.set_position(0); + let mut reader = BinReader::new( + &mut cursor, + ProtocolVersion(1), + DeserializationMode::default(), + ); + let block2: BitmapBlock = Readable::read(&mut reader).unwrap(); + assert_eq!(block, block2); + } + + #[test] + fn block_ser_roundtrip() { + let threshold = BitmapBlock::NBITS as usize / 16; + let entries = thread_rng().gen_range(threshold, 4 * threshold); + test_roundtrip(entries, false, 0, 2 + BitmapBlock::NBITS as usize / 8, 64); + test_roundtrip(entries, true, 0, 2 + BitmapBlock::NBITS as usize / 8, 64); + } + + #[test] + fn sparse_block_ser_roundtrip() { + let entries = + thread_rng().gen_range(BitmapChunk::LEN_BITS, BitmapBlock::NBITS as usize / 16); + test_roundtrip(entries, false, 1, 4 + 2 * entries, 64); + } + + #[test] + fn sparse_unfull_block_ser_roundtrip() { + let entries = + thread_rng().gen_range(BitmapChunk::LEN_BITS, BitmapBlock::NBITS as usize / 16); + test_roundtrip(entries, false, 1, 4 + 2 * entries, 61); + } + + #[test] + fn abdundant_block_ser_roundtrip() { + let entries = + thread_rng().gen_range(BitmapChunk::LEN_BITS, BitmapBlock::NBITS as usize / 16); + test_roundtrip(entries, true, 2, 4 + 2 * entries, 64); + } + + #[test] + fn abdundant_unfull_block_ser_roundtrip() { + let entries = + thread_rng().gen_range(BitmapChunk::LEN_BITS, BitmapBlock::NBITS as usize / 16); + test_roundtrip(entries, true, 2, 4 + 2 * entries, 61); + } +} diff --git a/chain/src/txhashset/desegmenter.rs b/chain/src/txhashset/desegmenter.rs new file mode 100644 index 0000000000..fafb204098 --- /dev/null +++ b/chain/src/txhashset/desegmenter.rs @@ -0,0 +1,1018 @@ +// Copyright 2021 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Manages the reconsitution of a txhashset from segments produced by the +//! segmenter + +use std::sync::Arc; + +use crate::core::core::hash::{Hash, Hashed}; +use crate::core::core::{pmmr, pmmr::ReadablePMMR}; +use crate::core::core::{ + BlockHeader, BlockSums, OutputIdentifier, Segment, SegmentIdentifier, SegmentType, + SegmentTypeIdentifier, TxKernel, +}; +use crate::error::Error; +use crate::txhashset::{BitmapAccumulator, BitmapChunk, TxHashSet}; +use crate::types::{Tip, TxHashsetWriteStatus}; +use crate::util::secp::pedersen::RangeProof; +use crate::util::{RwLock, StopState}; +use crate::SyncState; + +use crate::pibd_params; +use crate::store; +use crate::txhashset; + +use croaring::Bitmap; + +/// Desegmenter for rebuilding a txhashset from PIBD segments +#[derive(Clone)] +pub struct Desegmenter { + txhashset: Arc>, + header_pmmr: Arc>>, + archive_header: BlockHeader, + bitmap_root_hash: Hash, // bitmap root hash must come as a result of handshake process + store: Arc, + + genesis: BlockHeader, + + default_bitmap_segment_height: u8, + default_output_segment_height: u8, + default_rangeproof_segment_height: u8, + default_kernel_segment_height: u8, + + bitmap_accumulator: BitmapAccumulator, + bitmap_segment_cache: Vec>, + output_segment_cache: Vec>, + rangeproof_segment_cache: Vec>, + kernel_segment_cache: Vec>, + + bitmap_mmr_leaf_count: u64, + bitmap_mmr_size: u64, + + /// Maximum number of segments to cache before we stop requesting others + max_cached_segments: usize, + + /// In-memory 'raw' bitmap corresponding to contents of bitmap accumulator + bitmap_cache: Option, + + /// Flag indicating there are no more segments to request + all_segments_complete: bool, + + latest_block_height: u64, +} + +impl Desegmenter { + /// Create a new segmenter based on the provided txhashset and the specified block header + pub fn new( + txhashset: Arc>, + header_pmmr: Arc>>, + archive_header: BlockHeader, + bitmap_root_hash: Hash, + genesis: BlockHeader, + store: Arc, + ) -> Desegmenter { + info!( + "Creating new desegmenter for bitmap_root_hash {}, height {}", + bitmap_root_hash, archive_header.height + ); + let mut retval = Desegmenter { + txhashset, + header_pmmr, + archive_header, + bitmap_root_hash, + store, + genesis, + bitmap_accumulator: BitmapAccumulator::new(), + default_bitmap_segment_height: pibd_params::BITMAP_SEGMENT_HEIGHT, + default_output_segment_height: pibd_params::OUTPUT_SEGMENT_HEIGHT, + default_rangeproof_segment_height: pibd_params::RANGEPROOF_SEGMENT_HEIGHT, + default_kernel_segment_height: pibd_params::KERNEL_SEGMENT_HEIGHT, + bitmap_segment_cache: vec![], + output_segment_cache: vec![], + rangeproof_segment_cache: vec![], + kernel_segment_cache: vec![], + + bitmap_mmr_leaf_count: 0, + bitmap_mmr_size: 0, + + max_cached_segments: pibd_params::MAX_CACHED_SEGMENTS, + + bitmap_cache: None, + + all_segments_complete: false, + + latest_block_height: 0, + }; + retval.calc_bitmap_mmr_sizes(); + retval + } + + /// Reset all state + pub fn reset(&mut self) { + self.all_segments_complete = false; + self.bitmap_segment_cache = vec![]; + self.output_segment_cache = vec![]; + self.rangeproof_segment_cache = vec![]; + self.kernel_segment_cache = vec![]; + self.bitmap_mmr_leaf_count = 0; + self.bitmap_mmr_size = 0; + self.bitmap_cache = None; + self.bitmap_accumulator = BitmapAccumulator::new(); + self.latest_block_height = 0; + self.calc_bitmap_mmr_sizes(); + } + + /// Return reference to the header used for validation + pub fn header(&self) -> &BlockHeader { + &self.archive_header + } + + /// Return size of bitmap mmr + pub fn expected_bitmap_mmr_size(&self) -> u64 { + self.bitmap_mmr_size + } + + /// Whether we have all the segments we need + pub fn is_complete(&self) -> bool { + self.all_segments_complete + } + + /// Check progress, update status if needed, returns true if all required + /// segments are in place + pub fn check_progress(&mut self, status: Arc) -> Result { + let local_output_mmr_size; + let local_kernel_mmr_size; + let local_rangeproof_mmr_size; + { + let txhashset = self.txhashset.read(); + local_output_mmr_size = txhashset.output_mmr_size(); + local_kernel_mmr_size = txhashset.kernel_mmr_size(); + local_rangeproof_mmr_size = txhashset.rangeproof_mmr_size(); + } + + // going to try presenting PIBD progress as total leaves downloaded + // total segments probably doesn't make much sense since the segment + // sizes will be able to change over time, and representative block height + // can be too lopsided if one pmmr completes faster, so perhaps just + // use total leaves downloaded and display as a percentage + let completed_leaves = pmmr::n_leaves(local_output_mmr_size) + + pmmr::n_leaves(local_rangeproof_mmr_size) + + pmmr::n_leaves(local_kernel_mmr_size); + + // Find latest 'complete' header. + // First take lesser of rangeproof and output mmr sizes + let latest_output_size = std::cmp::min(local_output_mmr_size, local_rangeproof_mmr_size); + + // Find first header in which 'output_mmr_size' and 'kernel_mmr_size' are greater than + // given sizes + + let res = { + let header_pmmr = self.header_pmmr.read(); + header_pmmr.get_first_header_with( + latest_output_size, + local_kernel_mmr_size, + self.latest_block_height, + self.store.clone(), + ) + }; + + if let Some(h) = res { + self.latest_block_height = h.height; + + // TODO: Unwraps + let tip = Tip::from_header(&h); + let batch = self.store.batch()?; + batch.save_pibd_head(&tip)?; + batch.commit()?; + + status.update_pibd_progress( + false, + false, + completed_leaves, + self.latest_block_height, + &self.archive_header, + ); + if local_kernel_mmr_size == self.archive_header.kernel_mmr_size + && local_output_mmr_size == self.archive_header.output_mmr_size + && local_rangeproof_mmr_size == self.archive_header.output_mmr_size + && self.bitmap_cache.is_some() + { + // All is complete + return Ok(true); + } + } + + Ok(false) + } + + /// Once the PIBD set is downloaded, we need to ensure that the respective leaf sets + /// match the bitmap (particularly in the case of outputs being spent after a PIBD catch-up) + pub fn check_update_leaf_set_state(&self) -> Result<(), Error> { + let mut header_pmmr = self.header_pmmr.write(); + let mut txhashset = self.txhashset.write(); + let mut _batch = self.store.batch()?; + txhashset::extending(&mut header_pmmr, &mut txhashset, &mut _batch, |ext, _| { + let extension = &mut ext.extension; + if let Some(b) = &self.bitmap_cache { + extension.update_leaf_sets(&b)?; + } + Ok(()) + })?; + Ok(()) + } + + /// This is largely copied from chain.rs txhashset_write and related functions, + /// the idea being that the txhashset version will eventually be removed + pub fn validate_complete_state( + &self, + status: Arc, + stop_state: Arc, + ) -> Result<(), Error> { + // Quick root check first: + { + let txhashset = self.txhashset.read(); + txhashset.roots()?.validate(&self.archive_header)?; + } + + // TODO: Possibly Keep track of this in the DB so we can pick up where we left off if needed + let last_rangeproof_validation_pos = 0; + + // Validate kernel history + { + debug!("desegmenter validation: rewinding and validating kernel history (readonly)"); + let txhashset = self.txhashset.read(); + let mut count = 0; + let mut current = self.archive_header.clone(); + let total = current.height; + txhashset::rewindable_kernel_view(&txhashset, |view, batch| { + while current.height > 0 { + view.rewind(¤t)?; + view.validate_root()?; + current = batch.get_previous_header(¤t)?; + count += 1; + if current.height % 100000 == 0 || current.height == total { + status.on_setup(Some(total - current.height), Some(total), None, None); + } + if stop_state.is_stopped() { + return Ok(()); + } + } + Ok(()) + })?; + debug!( + "desegmenter validation: validated kernel root on {} headers", + count, + ); + } + + if stop_state.is_stopped() { + return Ok(()); + } + + // Check kernel MMR root for every block header. + // Check NRD relative height rules for full kernel history. + + { + let header_pmmr = self.header_pmmr.read(); + let txhashset = self.txhashset.read(); + let batch = self.store.batch()?; + txhashset.verify_kernel_pos_index( + &self.genesis, + &header_pmmr, + &batch, + Some(status.clone()), + Some(stop_state.clone()), + )?; + } + + if stop_state.is_stopped() { + return Ok(()); + } + + status.on_setup(None, None, None, None); + // Prepare a new batch and update all the required records + { + debug!("desegmenter validation: rewinding a 2nd time (writeable)"); + let mut header_pmmr = self.header_pmmr.write(); + let mut txhashset = self.txhashset.write(); + let mut batch = self.store.batch()?; + txhashset::extending( + &mut header_pmmr, + &mut txhashset, + &mut batch, + |ext, batch| { + let extension = &mut ext.extension; + extension.rewind(&self.archive_header, batch)?; + + // Validate the extension, generating the utxo_sum and kernel_sum. + // Full validation, including rangeproofs and kernel signature verification. + let (utxo_sum, kernel_sum) = extension.validate( + &self.genesis, + false, + &*status, + Some(last_rangeproof_validation_pos), + None, + &self.archive_header, + Some(stop_state.clone()), + )?; + + if stop_state.is_stopped() { + return Ok(()); + } + + // Save the block_sums (utxo_sum, kernel_sum) to the db for use later. + batch.save_block_sums( + &self.archive_header.hash(), + BlockSums { + utxo_sum, + kernel_sum, + }, + )?; + + Ok(()) + }, + )?; + + if stop_state.is_stopped() { + return Ok(()); + } + + debug!("desegmenter_validation: finished validating and rebuilding"); + status.on_save(); + + { + // Save the new head to the db and rebuild the header by height index. + let tip = Tip::from_header(&self.archive_header); + + batch.save_body_head(&tip)?; + + // Reset the body tail to the body head after a txhashset write + batch.save_body_tail(&tip)?; + } + + // Rebuild our output_pos index in the db based on fresh UTXO set. + txhashset.init_output_pos_index(&header_pmmr, &batch)?; + + // Rebuild our NRD kernel_pos index based on recent kernel history. + txhashset.init_recent_kernel_pos_index(&header_pmmr, &batch)?; + + // Commit all the changes to the db. + batch.commit()?; + + debug!("desegmenter_validation: finished committing the batch (head etc.)"); + + status.on_done(); + } + Ok(()) + } + + /// Apply next set of segments that are ready to be appended to their respective trees, + /// and kick off any validations that can happen. + pub fn apply_next_segments(&mut self) -> Result<(), Error> { + let next_bmp_idx = self.next_required_bitmap_segment_index(); + if let Some(bmp_idx) = next_bmp_idx { + if let Some((idx, _seg)) = self + .bitmap_segment_cache + .iter() + .enumerate() + .find(|s| s.1.identifier().idx == bmp_idx) + { + self.apply_bitmap_segment(idx)?; + } + } else { + // Check if we need to finalize bitmap + if self.bitmap_cache == None { + // Should have all the pieces now, finalize the bitmap cache + self.finalize_bitmap()?; + } + + // Check if we can apply the next output segment(s) + if let Some(next_output_idx) = self.next_required_output_segment_index() { + if let Some((idx, _seg)) = self + .output_segment_cache + .iter() + .enumerate() + .find(|s| s.1.identifier().idx == next_output_idx) + { + self.apply_output_segment(idx)?; + } + } else { + if self.output_segment_cache.len() >= self.max_cached_segments { + self.output_segment_cache = vec![]; + } + } + // Check if we can apply the next rangeproof segment + if let Some(next_rp_idx) = self.next_required_rangeproof_segment_index() { + if let Some((idx, _seg)) = self + .rangeproof_segment_cache + .iter() + .enumerate() + .find(|s| s.1.identifier().idx == next_rp_idx) + { + self.apply_rangeproof_segment(idx)?; + } + } else { + if self.rangeproof_segment_cache.len() >= self.max_cached_segments { + self.rangeproof_segment_cache = vec![]; + } + } + // Check if we can apply the next kernel segment + if let Some(next_kernel_idx) = self.next_required_kernel_segment_index() { + if let Some((idx, _seg)) = self + .kernel_segment_cache + .iter() + .enumerate() + .find(|s| s.1.identifier().idx == next_kernel_idx) + { + self.apply_kernel_segment(idx)?; + } + } else { + if self.kernel_segment_cache.len() >= self.max_cached_segments { + self.kernel_segment_cache = vec![]; + } + } + } + Ok(()) + } + + /// Return list of the next preferred segments the desegmenter needs based on + /// the current real state of the underlying elements + pub fn next_desired_segments( + &mut self, + max_elements: usize, + ) -> Result, Error> { + let mut return_vec = vec![]; + // First check for required bitmap elements + if self.bitmap_cache.is_none() { + // Get current size of bitmap MMR + let local_pmmr_size = self.bitmap_accumulator.readonly_pmmr().unpruned_size(); + // Get iterator over expected bitmap elements + let mut identifier_iter = SegmentIdentifier::traversal_iter( + self.bitmap_mmr_size, + self.default_bitmap_segment_height, + ); + // Advance iterator to next expected segment + while let Some(id) = identifier_iter.next() { + if id.segment_pos_range(self.bitmap_mmr_size).1 > local_pmmr_size { + if !self.has_bitmap_segment_with_id(id) { + return_vec.push(SegmentTypeIdentifier::new(SegmentType::Bitmap, id)); + if return_vec.len() >= max_elements { + return Ok(return_vec); + } + } + } + } + } else { + // We have all required bitmap segments and have recreated our local + // bitmap, now continue with other segments, evenly spreading requests + // among MMRs + let local_output_mmr_size; + let local_kernel_mmr_size; + let local_rangeproof_mmr_size; + { + let txhashset = self.txhashset.read(); + local_output_mmr_size = txhashset.output_mmr_size(); + local_kernel_mmr_size = txhashset.kernel_mmr_size(); + local_rangeproof_mmr_size = txhashset.rangeproof_mmr_size(); + } + // TODO: Fix, alternative approach, this is very inefficient + let mut output_identifier_iter = SegmentIdentifier::traversal_iter( + self.archive_header.output_mmr_size, + self.default_output_segment_height, + ); + + let mut elems_added = 0; + while let Some(output_id) = output_identifier_iter.next() { + // Advance output iterator to next needed position + let (_first, last) = + output_id.segment_pos_range(self.archive_header.output_mmr_size); + if last <= local_output_mmr_size { + continue; + } + if self.output_segment_cache.len() >= self.max_cached_segments { + break; + } + if !self.has_output_segment_with_id(output_id) { + return_vec.push(SegmentTypeIdentifier::new(SegmentType::Output, output_id)); + elems_added += 1; + } + if elems_added == max_elements / 3 { + break; + } + } + + let mut rangeproof_identifier_iter = SegmentIdentifier::traversal_iter( + self.archive_header.output_mmr_size, + self.default_rangeproof_segment_height, + ); + + elems_added = 0; + while let Some(rp_id) = rangeproof_identifier_iter.next() { + let (_first, last) = rp_id.segment_pos_range(self.archive_header.output_mmr_size); + // Advance rangeproof iterator to next needed position + if last <= local_rangeproof_mmr_size { + continue; + } + if self.rangeproof_segment_cache.len() >= self.max_cached_segments { + break; + } + if !self.has_rangeproof_segment_with_id(rp_id) { + return_vec.push(SegmentTypeIdentifier::new(SegmentType::RangeProof, rp_id)); + elems_added += 1; + } + if elems_added == max_elements / 3 { + break; + } + } + + let mut kernel_identifier_iter = SegmentIdentifier::traversal_iter( + self.archive_header.kernel_mmr_size, + self.default_kernel_segment_height, + ); + + elems_added = 0; + while let Some(k_id) = kernel_identifier_iter.next() { + // Advance kernel iterator to next needed position + let (_first, last) = k_id.segment_pos_range(self.archive_header.kernel_mmr_size); + // Advance rangeproof iterator to next needed position + if last <= local_kernel_mmr_size { + continue; + } + if self.kernel_segment_cache.len() >= self.max_cached_segments { + break; + } + if !self.has_kernel_segment_with_id(k_id) { + return_vec.push(SegmentTypeIdentifier::new(SegmentType::Kernel, k_id)); + elems_added += 1; + } + if elems_added == max_elements / 3 { + break; + } + } + } + if return_vec.is_empty() && self.bitmap_cache.is_some() { + self.all_segments_complete = true; + } + Ok(return_vec) + } + + /// 'Finalize' the bitmap accumulator, storing an in-memory copy of the bitmap for + /// use in further validation and setting the accumulator on the underlying txhashset + pub fn finalize_bitmap(&mut self) -> Result<(), Error> { + trace!( + "pibd_desegmenter: finalizing and caching bitmap - accumulator root: {}", + self.bitmap_accumulator.root() + ); + self.bitmap_cache = Some(self.bitmap_accumulator.as_bitmap()?); + + // Set the txhashset's bitmap accumulator + let mut header_pmmr = self.header_pmmr.write(); + let mut txhashset = self.txhashset.write(); + let mut batch = self.store.batch()?; + txhashset::extending( + &mut header_pmmr, + &mut txhashset, + &mut batch, + |ext, _batch| { + let extension = &mut ext.extension; + extension.set_bitmap_accumulator(self.bitmap_accumulator.clone()); + Ok(()) + }, + )?; + Ok(()) + } + + // Calculate and store number of leaves and positions in the bitmap mmr given the number of + // outputs specified in the header. Should be called whenever the header changes + fn calc_bitmap_mmr_sizes(&mut self) { + // Number of leaves (BitmapChunks) + self.bitmap_mmr_leaf_count = + (pmmr::n_leaves(self.archive_header.output_mmr_size) + 1023) / 1024; + trace!( + "pibd_desegmenter - expected number of leaves in bitmap MMR: {}", + self.bitmap_mmr_leaf_count + ); + // Total size of Bitmap PMMR + self.bitmap_mmr_size = + 1 + pmmr::peaks(pmmr::insertion_to_pmmr_index(self.bitmap_mmr_leaf_count)) + .last() + .unwrap_or( + &(pmmr::peaks(pmmr::insertion_to_pmmr_index( + self.bitmap_mmr_leaf_count - 1, + )) + .last() + .unwrap()), + ) + .clone(); + + trace!( + "pibd_desegmenter - expected size of bitmap MMR: {}", + self.bitmap_mmr_size + ); + } + + /// Cache a bitmap segment if we don't already have it + fn cache_bitmap_segment(&mut self, in_seg: Segment) { + if self + .bitmap_segment_cache + .iter() + .find(|i| i.identifier() == in_seg.identifier()) + .is_none() + { + self.bitmap_segment_cache.push(in_seg); + } + } + + /// Whether our list already contains this bitmap segment + fn has_bitmap_segment_with_id(&self, seg_id: SegmentIdentifier) -> bool { + self.bitmap_segment_cache + .iter() + .find(|i| i.identifier() == seg_id) + .is_some() + } + + /// Return an identifier for the next segment we need for the bitmap pmmr + fn next_required_bitmap_segment_index(&self) -> Option { + let local_bitmap_pmmr_size = self.bitmap_accumulator.readonly_pmmr().unpruned_size(); + let cur_segment_count = SegmentIdentifier::count_segments_required( + local_bitmap_pmmr_size, + self.default_bitmap_segment_height, + ); + let total_segment_count = SegmentIdentifier::count_segments_required( + self.bitmap_mmr_size, + self.default_bitmap_segment_height, + ); + if cur_segment_count == total_segment_count { + None + } else { + Some(cur_segment_count as u64) + } + } + + /// Adds and validates a bitmap chunk + pub fn add_bitmap_segment( + &mut self, + segment: Segment, + bitmap_root_hash: Hash, + ) -> Result<(), Error> { + if bitmap_root_hash != self.bitmap_root_hash { + return Err(Error::InvalidBitmapRoot); + } + + trace!("pibd_desegmenter: add bitmap segment"); + segment.validate( + self.bitmap_mmr_size, // Last MMR pos at the height being validated, in this case of the bitmap root + None, + self.bitmap_root_hash, + )?; + trace!("pibd_desegmenter: adding segment to cache"); + // All okay, add to our cached list of bitmap segments + self.cache_bitmap_segment(segment); + Ok(()) + } + + /// Apply a bitmap segment at the index cache + pub fn apply_bitmap_segment(&mut self, idx: usize) -> Result<(), Error> { + let segment = self.bitmap_segment_cache.remove(idx); + trace!( + "pibd_desegmenter: apply bitmap segment at segment idx {}", + segment.identifier().idx + ); + // Add leaves to bitmap accumulator + let (_sid, _hash_pos, _hashes, _leaf_pos, leaf_data, _proof) = segment.parts(); + for chunk in leaf_data.into_iter() { + self.bitmap_accumulator.append_chunk(chunk)?; + } + Ok(()) + } + + /// Whether our list already contains this bitmap segment + fn has_output_segment_with_id(&self, seg_id: SegmentIdentifier) -> bool { + self.output_segment_cache + .iter() + .find(|i| i.identifier() == seg_id) + .is_some() + } + + /// Cache an output segment if we don't already have it + fn cache_output_segment(&mut self, in_seg: Segment) { + if self + .output_segment_cache + .iter() + .find(|i| i.identifier() == in_seg.identifier()) + .is_none() + { + self.output_segment_cache.push(in_seg); + } + } + + /// Apply an output segment at the index cache + pub fn apply_output_segment(&mut self, idx: usize) -> Result<(), Error> { + let segment = self.output_segment_cache.remove(idx); + trace!( + "pibd_desegmenter: applying output segment at segment idx {}", + segment.identifier().idx + ); + let mut header_pmmr = self.header_pmmr.write(); + let mut txhashset = self.txhashset.write(); + let mut batch = self.store.batch()?; + txhashset::extending( + &mut header_pmmr, + &mut txhashset, + &mut batch, + |ext, _batch| { + let extension = &mut ext.extension; + extension.apply_output_segment(segment)?; + Ok(()) + }, + )?; + Ok(()) + } + + /// Return an identifier for the next segment we need for the output pmmr + fn next_required_output_segment_index(&self) -> Option { + let local_output_mmr_size; + { + let txhashset = self.txhashset.read(); + local_output_mmr_size = txhashset.output_mmr_size(); + } + + // Special case here. If the mmr size is 1, this is a fresh chain + // with naught but a humble genesis block. We need segment 0, (and + // also need to skip the genesis block when applying the segment) + // note this is implementation-specific, the code for creating + // a new chain creates the genesis block pmmr entries by default + + let mut cur_segment_count = if local_output_mmr_size == 1 { + 0 + } else { + SegmentIdentifier::count_segments_required( + local_output_mmr_size, + self.default_output_segment_height, + ) + }; + + // When resuming, we need to ensure we're getting the previous segment if needed + let theoretical_pmmr_size = + SegmentIdentifier::pmmr_size(cur_segment_count, self.default_output_segment_height); + if local_output_mmr_size < theoretical_pmmr_size { + cur_segment_count -= 1; + } + + let total_segment_count = SegmentIdentifier::count_segments_required( + self.archive_header.output_mmr_size, + self.default_output_segment_height, + ); + trace!( + "Next required output segment is {} of {}", + cur_segment_count, + total_segment_count + ); + if cur_segment_count == total_segment_count { + None + } else { + Some(cur_segment_count as u64) + } + } + + /// Adds a output segment + pub fn add_output_segment( + &mut self, + segment: Segment, + bitmap_root_hash: Hash, + ) -> Result<(), Error> { + if bitmap_root_hash != self.bitmap_root_hash { + return Err(Error::InvalidBitmapRoot); + } + + trace!("pibd_desegmenter: add output segment"); + segment.validate( + self.archive_header.output_mmr_size, // Last MMR pos at the height being validated + self.bitmap_cache.as_ref(), + self.archive_header.output_root, // Output root we're checking for + )?; + self.cache_output_segment(segment); + Ok(()) + } + + /// Whether our list already contains this rangeproof segment + fn has_rangeproof_segment_with_id(&self, seg_id: SegmentIdentifier) -> bool { + self.rangeproof_segment_cache + .iter() + .find(|i| i.identifier() == seg_id) + .is_some() + } + + /// Cache a RangeProof segment if we don't already have it + fn cache_rangeproof_segment(&mut self, in_seg: Segment) { + if self + .rangeproof_segment_cache + .iter() + .find(|i| i.identifier() == in_seg.identifier()) + .is_none() + { + self.rangeproof_segment_cache.push(in_seg); + } + } + + /// Apply a rangeproof segment at the index cache + pub fn apply_rangeproof_segment(&mut self, idx: usize) -> Result<(), Error> { + let segment = self.rangeproof_segment_cache.remove(idx); + trace!( + "pibd_desegmenter: applying rangeproof segment at segment idx {}", + segment.identifier().idx + ); + let mut header_pmmr = self.header_pmmr.write(); + let mut txhashset = self.txhashset.write(); + let mut batch = self.store.batch()?; + txhashset::extending( + &mut header_pmmr, + &mut txhashset, + &mut batch, + |ext, _batch| { + let extension = &mut ext.extension; + extension.apply_rangeproof_segment(segment)?; + Ok(()) + }, + )?; + Ok(()) + } + + /// Return an identifier for the next segment we need for the rangeproof pmmr + fn next_required_rangeproof_segment_index(&self) -> Option { + let local_rangeproof_mmr_size; + { + let txhashset = self.txhashset.read(); + local_rangeproof_mmr_size = txhashset.rangeproof_mmr_size(); + } + + // Special case here. If the mmr size is 1, this is a fresh chain + // with naught but a humble genesis block. We need segment 0, (and + // also need to skip the genesis block when applying the segment) + + let mut cur_segment_count = if local_rangeproof_mmr_size == 1 { + 0 + } else { + SegmentIdentifier::count_segments_required( + local_rangeproof_mmr_size, + self.default_rangeproof_segment_height, + ) + }; + + // When resuming, we need to ensure we're getting the previous segment if needed + let theoretical_pmmr_size = + SegmentIdentifier::pmmr_size(cur_segment_count, self.default_rangeproof_segment_height); + if local_rangeproof_mmr_size < theoretical_pmmr_size { + cur_segment_count -= 1; + } + + let total_segment_count = SegmentIdentifier::count_segments_required( + self.archive_header.output_mmr_size, + self.default_rangeproof_segment_height, + ); + trace!( + "Next required rangeproof segment is {} of {}", + cur_segment_count, + total_segment_count + ); + if cur_segment_count == total_segment_count { + None + } else { + Some(cur_segment_count as u64) + } + } + + /// Adds a Rangeproof segment + pub fn add_rangeproof_segment( + &mut self, + segment: Segment, + bitmap_root_hash: Hash, + ) -> Result<(), Error> { + if bitmap_root_hash != self.bitmap_root_hash { + return Err(Error::InvalidBitmapRoot); + } + + trace!("pibd_desegmenter: add rangeproof segment"); + segment.validate( + self.archive_header.output_mmr_size, // Last MMR pos at the height being validated + self.bitmap_cache.as_ref(), + self.archive_header.range_proof_root, // Range proof root we're checking for + )?; + self.cache_rangeproof_segment(segment); + Ok(()) + } + + /// Whether our list already contains this kernel segment + fn has_kernel_segment_with_id(&self, seg_id: SegmentIdentifier) -> bool { + self.kernel_segment_cache + .iter() + .find(|i| i.identifier() == seg_id) + .is_some() + } + + /// Cache a Kernel segment if we don't already have it + fn cache_kernel_segment(&mut self, in_seg: Segment) { + if self + .kernel_segment_cache + .iter() + .find(|i| i.identifier() == in_seg.identifier()) + .is_none() + { + self.kernel_segment_cache.push(in_seg); + } + } + + /// Apply a kernel segment at the index cache + pub fn apply_kernel_segment(&mut self, idx: usize) -> Result<(), Error> { + let segment = self.kernel_segment_cache.remove(idx); + trace!( + "pibd_desegmenter: applying kernel segment at segment idx {}", + segment.identifier().idx + ); + let mut header_pmmr = self.header_pmmr.write(); + let mut txhashset = self.txhashset.write(); + let mut batch = self.store.batch()?; + txhashset::extending( + &mut header_pmmr, + &mut txhashset, + &mut batch, + |ext, _batch| { + let extension = &mut ext.extension; + extension.apply_kernel_segment(segment)?; + Ok(()) + }, + )?; + Ok(()) + } + + /// Return an identifier for the next segment we need for the kernel pmmr + fn next_required_kernel_segment_index(&self) -> Option { + let local_kernel_mmr_size; + { + let txhashset = self.txhashset.read(); + local_kernel_mmr_size = txhashset.kernel_mmr_size(); + } + + let mut cur_segment_count = if local_kernel_mmr_size == 1 { + 0 + } else { + SegmentIdentifier::count_segments_required( + local_kernel_mmr_size, + self.default_kernel_segment_height, + ) + }; + + // When resuming, we need to ensure we're getting the previous segment if needed + let theoretical_pmmr_size = + SegmentIdentifier::pmmr_size(cur_segment_count, self.default_kernel_segment_height); + if local_kernel_mmr_size < theoretical_pmmr_size { + cur_segment_count -= 1; + } + + let total_segment_count = SegmentIdentifier::count_segments_required( + self.archive_header.kernel_mmr_size, + self.default_kernel_segment_height, + ); + trace!( + "Next required kernel segment is {} of {}", + cur_segment_count, + total_segment_count + ); + if cur_segment_count == total_segment_count { + None + } else { + Some(cur_segment_count as u64) + } + } + + /// Adds a Kernel segment + pub fn add_kernel_segment( + &mut self, + segment: Segment, + bitmap_root_hash: Hash, + ) -> Result<(), Error> { + if bitmap_root_hash != self.bitmap_root_hash { + return Err(Error::InvalidBitmapRoot); + } + trace!("pibd_desegmenter: add kernel segment"); + segment.validate( + self.archive_header.kernel_mmr_size, // Last MMR pos at the height being validated + None, + self.archive_header.kernel_root, // Kernel root we're checking for + )?; + self.cache_kernel_segment(segment); + Ok(()) + } +} diff --git a/chain/src/txhashset/rewindable_kernel_view.rs b/chain/src/txhashset/rewindable_kernel_view.rs index 567ba2df99..c65fe75375 100644 --- a/chain/src/txhashset/rewindable_kernel_view.rs +++ b/chain/src/txhashset/rewindable_kernel_view.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,9 +14,9 @@ //! Lightweight readonly view into kernel MMR for convenience. -use crate::core::core::pmmr::RewindablePMMR; +use crate::core::core::pmmr::{ReadablePMMR, ReadonlyPMMR, RewindablePMMR}; use crate::core::core::{BlockHeader, TxKernel}; -use crate::error::{Error, ErrorKind}; +use crate::error::Error; use grin_store::pmmr::PMMRBackend; /// Rewindable (but readonly) view of the kernel set (based on kernel MMR). @@ -40,7 +40,7 @@ impl<'a> RewindableKernelView<'a> { pub fn rewind(&mut self, header: &BlockHeader) -> Result<(), Error> { self.pmmr .rewind(header.kernel_mmr_size) - .map_err(|e| ErrorKind::TxHashSetErr(e))?; + .map_err(|e| Error::TxHashSetErr(e))?; // Update our header to reflect the one we rewound to. self.header = header.clone(); @@ -54,14 +54,21 @@ impl<'a> RewindableKernelView<'a> { /// fast sync where a reorg past the horizon could allow a whole rewrite of /// the kernel set. pub fn validate_root(&self) -> Result<(), Error> { - let root = self.pmmr.root().map_err(|e| ErrorKind::InvalidRoot(e))?; + let root = self + .readonly_pmmr() + .root() + .map_err(|e| Error::InvalidRoot(e))?; if root != self.header.kernel_root { - return Err(ErrorKind::InvalidTxHashSet(format!( + return Err(Error::InvalidTxHashSet(format!( "Kernel root at {} does not match", self.header.height - )) - .into()); + ))); } Ok(()) } + + /// Readonly view of our internal data. + pub fn readonly_pmmr(&self) -> ReadonlyPMMR> { + self.pmmr.as_readonly() + } } diff --git a/chain/src/txhashset/segmenter.rs b/chain/src/txhashset/segmenter.rs new file mode 100644 index 0000000000..96ed0f9bf6 --- /dev/null +++ b/chain/src/txhashset/segmenter.rs @@ -0,0 +1,136 @@ +// Copyright 2021 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Generation of the various necessary segments requested during PIBD. + +use std::{sync::Arc, time::Instant}; + +use crate::core::core::hash::Hash; +use crate::core::core::pmmr::ReadablePMMR; +use crate::core::core::{BlockHeader, OutputIdentifier, Segment, SegmentIdentifier, TxKernel}; +use crate::error::Error; +use crate::txhashset::{BitmapAccumulator, BitmapChunk, TxHashSet}; +use crate::util::secp::pedersen::RangeProof; +use crate::util::RwLock; + +/// Segmenter for generating PIBD segments. +#[derive(Clone)] +pub struct Segmenter { + txhashset: Arc>, + bitmap_snapshot: Arc, + header: BlockHeader, +} + +impl Segmenter { + /// Create a new segmenter based on the provided txhashset. + pub fn new( + txhashset: Arc>, + bitmap_snapshot: Arc, + header: BlockHeader, + ) -> Segmenter { + Segmenter { + txhashset, + bitmap_snapshot, + header, + } + } + + /// Header associated with this segmenter instance. + /// The bitmap "snapshot" corresponds to rewound state at this header. + pub fn header(&self) -> &BlockHeader { + &self.header + } + + /// Create a kernel segment. + pub fn kernel_segment(&self, id: SegmentIdentifier) -> Result, Error> { + let now = Instant::now(); + let txhashset = self.txhashset.read(); + let kernel_pmmr = txhashset.kernel_pmmr_at(&self.header); + let segment = Segment::from_pmmr(id, &kernel_pmmr, false)?; + debug!( + "kernel_segment: id: ({}, {}), leaves: {}, hashes: {}, proof hashes: {}, took {}ms", + segment.id().height, + segment.id().idx, + segment.leaf_iter().count(), + segment.hash_iter().count(), + segment.proof().size(), + now.elapsed().as_millis() + ); + Ok(segment) + } + + /// The root of the bitmap snapshot PMMR. + pub fn bitmap_root(&self) -> Result { + let pmmr = self.bitmap_snapshot.readonly_pmmr(); + let root = pmmr.root().map_err(&Error::TxHashSetErr)?; + Ok(root) + } + + /// Create a utxo bitmap segment based on our bitmap "snapshot" and return it with + /// the corresponding output root. + pub fn bitmap_segment(&self, id: SegmentIdentifier) -> Result, Error> { + let now = Instant::now(); + let bitmap_pmmr = self.bitmap_snapshot.readonly_pmmr(); + let segment = Segment::from_pmmr(id, &bitmap_pmmr, false)?; + debug!( + "bitmap_segment: id: ({}, {}), leaves: {}, hashes: {}, proof hashes: {}, took {}ms", + segment.id().height, + segment.id().idx, + segment.leaf_iter().count(), + segment.hash_iter().count(), + segment.proof().size(), + now.elapsed().as_millis() + ); + Ok(segment) + } + + /// Create an output segment and return it with the corresponding bitmap root. + pub fn output_segment( + &self, + id: SegmentIdentifier, + ) -> Result, Error> { + let now = Instant::now(); + let txhashset = self.txhashset.read(); + let output_pmmr = txhashset.output_pmmr_at(&self.header); + let segment = Segment::from_pmmr(id, &output_pmmr, true)?; + debug!( + "output_segment: id: ({}, {}), leaves: {}, hashes: {}, proof hashes: {}, took {}ms", + segment.id().height, + segment.id().idx, + segment.leaf_iter().count(), + segment.hash_iter().count(), + segment.proof().size(), + now.elapsed().as_millis() + ); + Ok(segment) + } + + /// Create a rangeproof segment. + pub fn rangeproof_segment(&self, id: SegmentIdentifier) -> Result, Error> { + let now = Instant::now(); + let txhashset = self.txhashset.read(); + let pmmr = txhashset.rangeproof_pmmr_at(&self.header); + let segment = Segment::from_pmmr(id, &pmmr, true)?; + debug!( + "rangeproof_segment: id: ({}, {}), leaves: {}, hashes: {}, proof hashes: {}, took {}ms", + segment.id().height, + segment.id().idx, + segment.leaf_iter().count(), + segment.hash_iter().count(), + segment.proof().size(), + now.elapsed().as_millis() + ); + Ok(segment) + } +} diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index 0e0adee9d7..fa4a01ecad 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,21 +19,26 @@ use crate::core::consensus::WEEK_HEIGHT; use crate::core::core::committed::Committed; use crate::core::core::hash::{Hash, Hashed}; use crate::core::core::merkle_proof::MerkleProof; -use crate::core::core::pmmr::{self, Backend, ReadonlyPMMR, RewindablePMMR, PMMR}; -use crate::core::core::{Block, BlockHeader, KernelFeatures, Output, OutputIdentifier, TxKernel}; +use crate::core::core::pmmr::{ + self, Backend, ReadablePMMR, ReadonlyPMMR, RewindablePMMR, VecBackend, PMMR, +}; +use crate::core::core::{ + Block, BlockHeader, KernelFeatures, Output, OutputIdentifier, Segment, TxKernel, +}; use crate::core::global; use crate::core::ser::{PMMRable, ProtocolVersion}; -use crate::error::{Error, ErrorKind}; +use crate::error::Error; use crate::linked_list::{ListIndex, PruneableListIndex, RewindableListIndex}; use crate::store::{self, Batch, ChainStore}; -use crate::txhashset::bitmap_accumulator::BitmapAccumulator; +use crate::txhashset::bitmap_accumulator::{BitmapAccumulator, BitmapChunk}; use crate::txhashset::{RewindableKernelView, UTXOView}; -use crate::types::{CommitPos, HashHeight, OutputRoots, Tip, TxHashSetRoots, TxHashsetWriteStatus}; +use crate::types::{CommitPos, HashHeight, Tip, TxHashSetRoots, TxHashsetWriteStatus}; use crate::util::secp::pedersen::{Commitment, RangeProof}; -use crate::util::{file, secp_static, zip}; +use crate::util::{file, secp_static, zip, StopState}; +use crate::SyncState; use croaring::Bitmap; -use grin_store; use grin_store::pmmr::{clean_files_by_prefix, PMMRBackend}; +use std::cmp::Ordering; use std::fs::{self, File}; use std::path::{Path, PathBuf}; use std::sync::Arc; @@ -47,17 +52,69 @@ const KERNEL_SUBDIR: &str = "kernel"; const TXHASHSET_ZIP: &str = "txhashset_snapshot"; +/// Convenience enum to keep track of hash and leaf insertions when rebuilding an mmr +/// from segments +#[derive(Eq)] +enum OrderedHashLeafNode { + /// index of data in hashes array, pmmr position + Hash(usize, u64), + /// index of data in leaf_data array, pmmr position + Leaf(usize, u64), +} + +impl PartialEq for OrderedHashLeafNode { + fn eq(&self, other: &Self) -> bool { + let a_val = match self { + OrderedHashLeafNode::Hash(_, pos0) => pos0, + OrderedHashLeafNode::Leaf(_, pos0) => pos0, + }; + let b_val = match other { + OrderedHashLeafNode::Hash(_, pos0) => pos0, + OrderedHashLeafNode::Leaf(_, pos0) => pos0, + }; + a_val == b_val + } +} + +impl Ord for OrderedHashLeafNode { + fn cmp(&self, other: &Self) -> Ordering { + let a_val = match self { + OrderedHashLeafNode::Hash(_, pos0) => pos0, + OrderedHashLeafNode::Leaf(_, pos0) => pos0, + }; + let b_val = match other { + OrderedHashLeafNode::Hash(_, pos0) => pos0, + OrderedHashLeafNode::Leaf(_, pos0) => pos0, + }; + a_val.cmp(&b_val) + } +} + +impl PartialOrd for OrderedHashLeafNode { + fn partial_cmp(&self, other: &Self) -> Option { + let a_val = match self { + OrderedHashLeafNode::Hash(_, pos0) => pos0, + OrderedHashLeafNode::Leaf(_, pos0) => pos0, + }; + let b_val = match other { + OrderedHashLeafNode::Hash(_, pos0) => pos0, + OrderedHashLeafNode::Leaf(_, pos0) => pos0, + }; + Some(a_val.cmp(b_val)) + } +} + /// Convenience wrapper around a single prunable MMR backend. pub struct PMMRHandle { /// The backend storage for the MMR. pub backend: PMMRBackend, - /// The last position accessible via this MMR handle (backend may continue out beyond this). - pub last_pos: u64, + /// The MMR size accessible via this handle (backend may continue out beyond this). + pub size: u64, } impl PMMRHandle { - /// Constructor to new a PMMR handle from an existing directory structure on disk. - /// news the backend files as necessary if they do not already exist. + /// Constructor to create a PMMR handle from an existing directory structure on disk. + /// Creates the backend files as necessary if they do not already exist. pub fn new>( path: P, prunable: bool, @@ -66,8 +123,8 @@ impl PMMRHandle { ) -> Result, Error> { fs::create_dir_all(&path)?; let backend = PMMRBackend::new(&path, prunable, version, header)?; - let last_pos = backend.unpruned_size(); - Ok(PMMRHandle { backend, last_pos }) + let size = backend.unpruned_size(); + Ok(PMMRHandle { backend, size }) } } @@ -83,55 +140,83 @@ impl PMMRHandle { head.hash(), head.height ); - return Err(ErrorKind::Other("header PMMR inconsistent".to_string()).into()); + return Err(Error::Other("header PMMR inconsistent".to_string())); } - // 1-indexed pos and we want to account for subsequent parent hash pos. - // so use next header pos to find our last_pos. + // use next header pos to find our size. let next_height = head.height + 1; - let next_pos = pmmr::insertion_to_pmmr_index(next_height + 1); - let pos = next_pos.saturating_sub(1); + let size = pmmr::insertion_to_pmmr_index(next_height); debug!( "init_head: header PMMR: current head {} at pos {}", - head_hash, self.last_pos + head_hash, self.size ); debug!( "init_head: header PMMR: resetting to {} at pos {} (height {})", head.hash(), - pos, + size, head.height ); - self.last_pos = pos; + self.size = size; Ok(()) } /// Get the header hash at the specified height based on the current header MMR state. pub fn get_header_hash_by_height(&self, height: u64) -> Result { - let pos = pmmr::insertion_to_pmmr_index(height + 1); - let header_pmmr = ReadonlyPMMR::at(&self.backend, self.last_pos); + if height >= self.size { + return Err(Error::InvalidHeaderHeight(height)); + } + let pos = pmmr::insertion_to_pmmr_index(height); + let header_pmmr = ReadonlyPMMR::at(&self.backend, self.size); if let Some(entry) = header_pmmr.get_data(pos) { Ok(entry.hash()) } else { - Err(ErrorKind::Other(format!("not found header hash for height {}", height)).into()) + Err(Error::Other(format!( + "not found header hash for height {}", + height + ))) } } /// Get the header hash for the head of the header chain based on current MMR state. /// Find the last leaf pos based on MMR size and return its header hash. pub fn head_hash(&self) -> Result { - if self.last_pos == 0 { - return Err(ErrorKind::Other("MMR empty, no head".to_string()).into()); + if self.size == 0 { + return Err(Error::Other("MMR empty, no head".to_string())); } - let header_pmmr = ReadonlyPMMR::at(&self.backend, self.last_pos); - let leaf_pos = pmmr::bintree_rightmost(self.last_pos); + let header_pmmr = ReadonlyPMMR::at(&self.backend, self.size); + let leaf_pos = pmmr::bintree_rightmost(self.size - 1); if let Some(entry) = header_pmmr.get_data(leaf_pos) { Ok(entry.hash()) } else { - Err(ErrorKind::Other("failed to find head hash".to_string()).into()) + Err(Error::Other("failed to find head hash".to_string())) } } + + /// Get the first header with all output and kernel mmrs > provided + pub fn get_first_header_with( + &self, + output_pos: u64, + kernel_pos: u64, + from_height: u64, + store: Arc, + ) -> Option { + let mut cur_height = pmmr::round_up_to_leaf_pos(from_height); + let header_pmmr = ReadonlyPMMR::at(&self.backend, self.size); + let mut candidate: Option = None; + while let Some(header_entry) = header_pmmr.get_data(cur_height) { + if let Ok(bh) = store.get_block_header(&header_entry.hash()) { + if bh.output_mmr_size <= output_pos && bh.kernel_mmr_size <= kernel_pos { + candidate = Some(bh) + } else { + return candidate; + } + } + cur_height = pmmr::round_up_to_leaf_pos(cur_height + 1); + } + None + } } /// An easy to manipulate structure holding the 3 MMRs necessary to @@ -193,7 +278,7 @@ impl TxHashSet { version, None, )?; - if handle.last_pos == 0 { + if handle.size == 0 { debug!( "attempting to open (empty) kernel PMMR using {:?} - SUCCESS", version @@ -201,7 +286,7 @@ impl TxHashSet { maybe_kernel_handle = Some(handle); break; } - let kernel: Option = ReadonlyPMMR::at(&handle.backend, 1).get_data(1); + let kernel: Option = ReadonlyPMMR::at(&handle.backend, 1).get_data(0); if let Some(kernel) = kernel { if kernel.verify().is_ok() { debug!( @@ -232,7 +317,9 @@ impl TxHashSet { commit_index, }) } else { - Err(ErrorKind::TxHashSetErr("failed to open kernel PMMR".to_string()).into()) + Err(Error::TxHashSetErr( + "failed to open kernel PMMR".to_string(), + )) } } @@ -240,10 +327,10 @@ impl TxHashSet { fn bitmap_accumulator( pmmr_h: &PMMRHandle, ) -> Result { - let pmmr = ReadonlyPMMR::at(&pmmr_h.backend, pmmr_h.last_pos); - let size = pmmr::n_leaves(pmmr_h.last_pos); + let pmmr = ReadonlyPMMR::at(&pmmr_h.backend, pmmr_h.size); + let nbits = pmmr::n_leaves(pmmr_h.size); let mut bitmap_accumulator = BitmapAccumulator::new(); - bitmap_accumulator.init(&mut pmmr.leaf_idx_iter(0), size)?; + bitmap_accumulator.init(&mut pmmr.leaf_idx_iter(0), nbits)?; Ok(bitmap_accumulator) } @@ -262,12 +349,12 @@ impl TxHashSet { commit: Commitment, ) -> Result, Error> { match self.commit_index.get_output_pos_height(&commit) { - Ok(Some(pos)) => { + Ok(Some(pos1)) => { let output_pmmr: ReadonlyPMMR<'_, OutputIdentifier, _> = - ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.last_pos); - if let Some(out) = output_pmmr.get_data(pos.pos) { + ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.size); + if let Some(out) = output_pmmr.get_data(pos1.pos - 1) { if out.commitment() == commit { - Ok(Some((out, pos))) + Ok(Some((out, pos1))) } else { Ok(None) } @@ -276,7 +363,7 @@ impl TxHashSet { } } Ok(None) => Ok(None), - Err(e) => Err(ErrorKind::StoreErr(e, "txhashset unspent check".to_string()).into()), + Err(e) => Err(Error::StoreErr(e, "txhashset unspent check".to_string())), } } @@ -285,22 +372,46 @@ impl TxHashSet { /// TODO: These need to return the actual data from the flat-files instead /// of hashes now pub fn last_n_output(&self, distance: u64) -> Vec<(Hash, OutputIdentifier)> { - ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.last_pos) + ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.size) .get_last_n_insertions(distance) } /// as above, for range proofs pub fn last_n_rangeproof(&self, distance: u64) -> Vec<(Hash, RangeProof)> { - ReadonlyPMMR::at(&self.rproof_pmmr_h.backend, self.rproof_pmmr_h.last_pos) + ReadonlyPMMR::at(&self.rproof_pmmr_h.backend, self.rproof_pmmr_h.size) .get_last_n_insertions(distance) } /// as above, for kernels pub fn last_n_kernel(&self, distance: u64) -> Vec<(Hash, TxKernel)> { - ReadonlyPMMR::at(&self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos) + ReadonlyPMMR::at(&self.kernel_pmmr_h.backend, self.kernel_pmmr_h.size) .get_last_n_insertions(distance) } + /// Efficient view into the kernel PMMR based on size in header. + pub fn kernel_pmmr_at( + &self, + header: &BlockHeader, + ) -> ReadonlyPMMR> { + ReadonlyPMMR::at(&self.kernel_pmmr_h.backend, header.kernel_mmr_size) + } + + /// Efficient view into the output PMMR based on size in header. + pub fn output_pmmr_at( + &self, + header: &BlockHeader, + ) -> ReadonlyPMMR> { + ReadonlyPMMR::at(&self.output_pmmr_h.backend, header.output_mmr_size) + } + + /// Efficient view into the rangeproof PMMR based on size in header. + pub fn rangeproof_pmmr_at( + &self, + header: &BlockHeader, + ) -> ReadonlyPMMR> { + ReadonlyPMMR::at(&self.rproof_pmmr_h.backend, header.output_mmr_size) + } + /// Convenience function to query the db for a header by its hash. pub fn get_block_header(&self, hash: &Hash) -> Result { Ok(self.commit_index.get_block_header(&hash)?) @@ -315,15 +426,10 @@ impl TxHashSet { max_count: u64, max_index: Option, ) -> (u64, Vec) { - ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.last_pos) + ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.size) .elements_from_pmmr_index(start_index, max_count, max_index) } - /// highest output insertion index available - pub fn highest_output_insertion_index(&self) -> u64 { - self.output_pmmr_h.last_pos - } - /// As above, for rangeproofs pub fn rangeproofs_by_pmmr_index( &self, @@ -331,11 +437,28 @@ impl TxHashSet { max_count: u64, max_index: Option, ) -> (u64, Vec) { - ReadonlyPMMR::at(&self.rproof_pmmr_h.backend, self.rproof_pmmr_h.last_pos) + ReadonlyPMMR::at(&self.rproof_pmmr_h.backend, self.rproof_pmmr_h.size) .elements_from_pmmr_index(start_index, max_count, max_index) } + /// size of output mmr + pub fn output_mmr_size(&self) -> u64 { + self.output_pmmr_h.size + } + + /// size of kernel mmr + pub fn kernel_mmr_size(&self) -> u64 { + self.kernel_pmmr_h.size + } + + /// size of rangeproof mmr (can differ from output mmr size during PIBD sync) + pub fn rangeproof_mmr_size(&self) -> u64 { + self.rproof_pmmr_h.size + } + /// Find a kernel with a given excess. Work backwards from `max_index` to `min_index` + /// NOTE: this linear search over all kernel history can be VERY expensive + /// public API access to this method should be limited pub fn find_kernel( &self, excess: &Commitment, @@ -343,13 +466,13 @@ impl TxHashSet { max_index: Option, ) -> Option<(TxKernel, u64)> { let min_index = min_index.unwrap_or(1); - let max_index = max_index.unwrap_or(self.kernel_pmmr_h.last_pos); + let max_index = max_index.unwrap_or(self.kernel_pmmr_h.size); - let pmmr = ReadonlyPMMR::at(&self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos); + let pmmr = ReadonlyPMMR::at(&self.kernel_pmmr_h.backend, self.kernel_pmmr_h.size); let mut index = max_index + 1; while index > min_index { index -= 1; - if let Some(kernel) = pmmr.get_data(index) { + if let Some(kernel) = pmmr.get_data(index - 1) { if &kernel.excess == excess { return Some((kernel, index)); } @@ -359,22 +482,16 @@ impl TxHashSet { } /// Get MMR roots. - pub fn roots(&self) -> TxHashSetRoots { - let output_pmmr = - ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.last_pos); - let rproof_pmmr = - ReadonlyPMMR::at(&self.rproof_pmmr_h.backend, self.rproof_pmmr_h.last_pos); - let kernel_pmmr = - ReadonlyPMMR::at(&self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos); + pub fn roots(&self) -> Result { + let output_pmmr = ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.size); + let rproof_pmmr = ReadonlyPMMR::at(&self.rproof_pmmr_h.backend, self.rproof_pmmr_h.size); + let kernel_pmmr = ReadonlyPMMR::at(&self.kernel_pmmr_h.backend, self.kernel_pmmr_h.size); - TxHashSetRoots { - output_roots: OutputRoots { - pmmr_root: output_pmmr.root(), - bitmap_root: self.bitmap_accumulator.root(), - }, - rproof_root: rproof_pmmr.root(), - kernel_root: kernel_pmmr.root(), - } + Ok(TxHashSetRoots { + output_root: output_pmmr.root().map_err(|e| Error::InvalidRoot(e))?, + rproof_root: rproof_pmmr.root().map_err(|e| Error::InvalidRoot(e))?, + kernel_root: kernel_pmmr.root().map_err(|e| Error::InvalidRoot(e))?, + }) } /// Return Commit's MMR position @@ -382,14 +499,12 @@ impl TxHashSet { Ok(self.commit_index.get_output_pos(&commit)?) } - /// build a new merkle proof for the given position. + /// build a new merkle proof for the given output commitment pub fn merkle_proof(&mut self, commit: Commitment) -> Result { - let pos = self.commit_index.get_output_pos(&commit)?; - PMMR::at(&mut self.output_pmmr_h.backend, self.output_pmmr_h.last_pos) - .merkle_proof(pos) - .map_err(|e| { - ErrorKind::MerkleProof(format!("Commit {:?}, pos {}, {}", commit, pos, e)).into() - }) + let pos0 = self.commit_index.get_output_pos(&commit)?; + PMMR::at(&mut self.output_pmmr_h.backend, self.output_pmmr_h.size) + .merkle_proof(pos0) + .map_err(|e| Error::MerkleProof(format!("Commit {:?}, pos {}, {}", commit, pos0, e))) } /// Compact the MMR data files and flush the rm logs @@ -429,7 +544,7 @@ impl TxHashSet { let cutoff = head.height.saturating_sub(WEEK_HEIGHT * 2); let cutoff_hash = header_pmmr.get_header_hash_by_height(cutoff)?; let cutoff_header = batch.get_block_header(&cutoff_hash)?; - self.verify_kernel_pos_index(&cutoff_header, header_pmmr, batch) + self.verify_kernel_pos_index(&cutoff_header, header_pmmr, batch, None, None) } /// Verify and (re)build the NRD kernel_pos index from the provided header onwards. @@ -438,6 +553,8 @@ impl TxHashSet { from_header: &BlockHeader, header_pmmr: &PMMRHandle, batch: &Batch<'_>, + status: Option>, + stop_state: Option>, ) -> Result<(), Error> { if !global::is_nrd_enabled() { return Ok(()); @@ -461,15 +578,16 @@ impl TxHashSet { prev_size, ); - let kernel_pmmr = - ReadonlyPMMR::at(&self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos); + let kernel_pmmr = ReadonlyPMMR::at(&self.kernel_pmmr_h.backend, self.kernel_pmmr_h.size); let mut current_pos = prev_size + 1; let mut current_header = from_header.clone(); let mut count = 0; - while current_pos <= self.kernel_pmmr_h.last_pos { - if pmmr::is_leaf(current_pos) { - if let Some(kernel) = kernel_pmmr.get_data(current_pos) { + let total = pmmr::n_leaves(self.kernel_pmmr_h.size); + let mut applied = 0; + while current_pos <= self.kernel_pmmr_h.size { + if pmmr::is_leaf(current_pos - 1) { + if let Some(kernel) = kernel_pmmr.get_data(current_pos - 1) { match kernel.features { KernelFeatures::NoRecentDuplicate { .. } => { while current_pos > current_header.kernel_mmr_size { @@ -487,7 +605,19 @@ impl TxHashSet { _ => {} } } + applied += 1; + if let Some(ref s) = status { + if total % applied == 10000 { + s.on_setup(None, None, Some(applied), Some(total)); + } + } + } + if let Some(ref s) = stop_state { + if s.is_stopped() { + return Ok(()); + } } + current_pos += 1; } @@ -509,18 +639,19 @@ impl TxHashSet { ) -> Result<(), Error> { let now = Instant::now(); - let output_pmmr = - ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.last_pos); + let output_pmmr = ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.size); // Iterate over the current output_pos index, removing any entries that // do not point to to the expected output. let mut removed_count = 0; - for (key, (pos, _)) in batch.output_pos_iter()? { - if let Some(out) = output_pmmr.get_data(pos) { - if let Ok(pos_via_mmr) = batch.get_output_pos(&out.commitment()) { + for (key, pos1) in batch.output_pos_iter()? { + let pos0 = pos1.pos - 1; + if let Some(out) = output_pmmr.get_data(pos0) { + if let Ok(pos0_via_mmr) = batch.get_output_pos(&out.commitment()) { // If the pos matches and the index key matches the commitment // then keep the entry, other we want to clean it up. - if pos == pos_via_mmr && batch.is_match_output_pos_key(&key, &out.commitment()) + if pos0 == pos0_via_mmr + && batch.is_match_output_pos_key(&key, &out.commitment()) { continue; } @@ -535,9 +666,9 @@ impl TxHashSet { ); let mut outputs_pos: Vec<(Commitment, u64)> = vec![]; - for pos in output_pmmr.leaf_pos_iter() { - if let Some(out) = output_pmmr.get_data(pos) { - outputs_pos.push((out.commit, pos)); + for pos0 in output_pmmr.leaf_pos_iter() { + if let Some(out) = output_pmmr.get_data(pos0) { + outputs_pos.push((out.commit, 1 + pos0)); } } @@ -567,15 +698,14 @@ impl TxHashSet { let hash = header_pmmr.get_header_hash_by_height(search_height + 1)?; let h = batch.get_block_header(&hash)?; while i < total_outputs { - let (commit, pos) = outputs_pos[i]; - if pos > h.output_mmr_size { - // Note: MMR position is 1-based and not 0-based, so here must be '>' instead of '>=' + let (commit, pos1) = outputs_pos[i]; + if pos1 > h.output_mmr_size { break; } batch.save_output_pos_height( &commit, CommitPos { - pos, + pos: pos1, height: h.height, }, )?; @@ -614,7 +744,7 @@ where let header_head = batch.header_head()?; let res = { - let header_pmmr = PMMR::at(&mut handle.backend, handle.last_pos); + let header_pmmr = PMMR::at(&mut handle.backend, handle.size); let mut header_extension = HeaderExtension::new(header_pmmr, header_head); let mut extension = Extension::new(trees, head); let mut extension_pair = ExtensionPair { @@ -649,13 +779,11 @@ where { let res: Result; { - let header_pmmr = ReadonlyPMMR::at(&handle.backend, handle.last_pos); - let output_pmmr = - ReadonlyPMMR::at(&trees.output_pmmr_h.backend, trees.output_pmmr_h.last_pos); - let rproof_pmmr = - ReadonlyPMMR::at(&trees.rproof_pmmr_h.backend, trees.rproof_pmmr_h.last_pos); + let header_pmmr = ReadonlyPMMR::at(&handle.backend, handle.size); + let output_pmmr = ReadonlyPMMR::at(&trees.output_pmmr_h.backend, trees.output_pmmr_h.size); + let rproof_pmmr = ReadonlyPMMR::at(&trees.rproof_pmmr_h.backend, trees.rproof_pmmr_h.size); - // new a new batch here to pass into the utxo_view. + // Create a new batch here to pass into the utxo_view. // Discard it (rollback) after we finish with the utxo_view. let batch = trees.commit_index.batch()?; let utxo = UTXOView::new(header_pmmr, output_pmmr, rproof_pmmr); @@ -666,8 +794,8 @@ where /// Rewindable (but still readonly) view on the kernel MMR. /// The underlying backend is readonly. But we permit the PMMR to be "rewound" -/// via last_pos. -/// We new a new db batch for this view and discard it (rollback) +/// via size. +/// We create a new db batch for this view and discard it (rollback) /// when we are done with the view. pub fn rewindable_kernel_view(trees: &TxHashSet, inner: F) -> Result where @@ -676,9 +804,9 @@ where let res: Result; { let kernel_pmmr = - RewindablePMMR::at(&trees.kernel_pmmr_h.backend, trees.kernel_pmmr_h.last_pos); + RewindablePMMR::at(&trees.kernel_pmmr_h.backend, trees.kernel_pmmr_h.size); - // new a new batch here to pass into the kernel_view. + // Create a new batch here to pass into the kernel_view. // Discard it (rollback) after we finish with the kernel_view. let batch = trees.commit_index.batch()?; let header = batch.head_header()?; @@ -712,13 +840,13 @@ where let head = batch.head()?; let header_head = batch.header_head()?; - // new a child transaction so if the state is rolled back by itself, all + // create a child transaction so if the state is rolled back by itself, all // index saving can be undone let child_batch = batch.child()?; { trace!("Starting new txhashset extension."); - let header_pmmr = PMMR::at(&mut header_pmmr.backend, header_pmmr.last_pos); + let header_pmmr = PMMR::at(&mut header_pmmr.backend, header_pmmr.size); let mut header_extension = HeaderExtension::new(header_pmmr, header_head); let mut extension = Extension::new(trees, head); let mut extension_pair = ExtensionPair { @@ -756,9 +884,9 @@ where trees.output_pmmr_h.backend.sync()?; trees.rproof_pmmr_h.backend.sync()?; trees.kernel_pmmr_h.backend.sync()?; - trees.output_pmmr_h.last_pos = sizes.0; - trees.rproof_pmmr_h.last_pos = sizes.1; - trees.kernel_pmmr_h.last_pos = sizes.2; + trees.output_pmmr_h.size = sizes.0; + trees.rproof_pmmr_h.size = sizes.1; + trees.kernel_pmmr_h.size = sizes.2; // Update our bitmap_accumulator based on our extension trees.bitmap_accumulator = bitmap_accumulator; @@ -783,8 +911,6 @@ where { let batch = store.batch()?; - // Note: Extending either the sync_head or header_head MMR here. - // Use underlying MMR to determine the "head". let head = match handle.head_hash() { Ok(hash) => { let header = batch.get_block_header(&hash)?; @@ -793,7 +919,7 @@ where Err(_) => Tip::default(), }; - let pmmr = PMMR::at(&mut handle.backend, handle.last_pos); + let pmmr = PMMR::at(&mut handle.backend, handle.size); let mut extension = HeaderExtension::new(pmmr, head); let res = inner(&mut extension, &batch); @@ -817,12 +943,10 @@ where let res: Result; let rollback: bool; - // new a child transaction so if the state is rolled back by itself, all + // create a child transaction so if the state is rolled back by itself, all // index saving can be undone let child_batch = batch.child()?; - // Note: Extending either the sync_head or header_head MMR here. - // Use underlying MMR to determine the "head". let head = match handle.head_hash() { Ok(hash) => { if let Ok(header) = child_batch.get_block_header(&hash) { @@ -835,7 +959,7 @@ where }; { - let pmmr = PMMR::at(&mut handle.backend, handle.last_pos); + let pmmr = PMMR::at(&mut handle.backend, handle.size); let mut extension = HeaderExtension::new(pmmr, head); res = inner(&mut extension, &child_batch); @@ -854,7 +978,7 @@ where } else { child_batch.commit()?; handle.backend.sync()?; - handle.last_pos = size; + handle.size = size; } Ok(r) } @@ -885,8 +1009,8 @@ impl<'a> HeaderExtension<'a> { } /// Get the header hash for the specified pos from the underlying MMR backend. - fn get_header_hash(&self, pos: u64) -> Option { - self.pmmr.get_data(pos).map(|x| x.hash()) + fn get_header_hash(&self, pos0: u64) -> Option { + self.pmmr.get_data(pos0).map(|x| x.hash()) } /// The head representing the furthest extent of the current extension. @@ -894,6 +1018,13 @@ impl<'a> HeaderExtension<'a> { self.head.clone() } + /// Get header hash by height. + /// Based on current header MMR. + pub fn get_header_hash_by_height(&self, height: u64) -> Option { + let pos = pmmr::insertion_to_pmmr_index(height); + self.get_header_hash(pos) + } + /// Get the header at the specified height based on the current state of the header extension. /// Derives the MMR pos from the height (insertion index) and retrieves the header hash. /// Looks the header up in the db by hash. @@ -902,32 +1033,29 @@ impl<'a> HeaderExtension<'a> { height: u64, batch: &Batch<'_>, ) -> Result { - let pos = pmmr::insertion_to_pmmr_index(height + 1); - if let Some(hash) = self.get_header_hash(pos) { + if let Some(hash) = self.get_header_hash_by_height(height) { Ok(batch.get_block_header(&hash)?) } else { - Err(ErrorKind::Other(format!("not found header for height {}", height)).into()) + Err(Error::Other(format!( + "not found header for height {}", + height + ))) } } /// Compares the provided header to the header in the header MMR at that height. /// If these match we know the header is on the current chain. - pub fn is_on_current_chain( + pub fn is_on_current_chain>( &self, - header: &BlockHeader, + t: T, batch: &Batch<'_>, - ) -> Result<(), Error> { - if header.height > self.head.height { - return Err( - ErrorKind::Other(format!("header is not on current chain, out beyond")).into(), - ); - } - let chain_header = self.get_header_by_height(header.height, batch)?; - if chain_header.hash() == header.hash() { - Ok(()) - } else { - Err(ErrorKind::Other("header is not on current chain".to_string()).into()) + ) -> Result { + let t = t.into(); + if t.height > self.head.height { + return Ok(false); } + let chain_header = self.get_header_by_height(t.height, batch)?; + Ok(chain_header.hash() == t.hash()) } /// Force the rollback of this extension, no matter the result. @@ -940,7 +1068,7 @@ impl<'a> HeaderExtension<'a> { /// extension. pub fn apply_header(&mut self, header: &BlockHeader) -> Result<(), Error> { self.pmmr.push(header).map_err(|e| { - ErrorKind::TxHashSetErr(format!( + Error::TxHashSetErr(format!( "Unable to apply header with height {}, {}", header.height, e )) @@ -960,9 +1088,9 @@ impl<'a> HeaderExtension<'a> { self.head.height, ); - let header_pos = pmmr::insertion_to_pmmr_index(header.height + 1); + let header_pos = 1 + pmmr::insertion_to_pmmr_index(header.height); self.pmmr.rewind(header_pos, &Bitmap::new()).map_err(|e| { - ErrorKind::TxHashSetErr(format!("pmmr rewind for pos {}, {}", header_pos, e)) + Error::TxHashSetErr(format!("pmmr rewind for pos {}, {}", header_pos, e)) })?; // Update our head to reflect the header we rewound to. @@ -978,7 +1106,7 @@ impl<'a> HeaderExtension<'a> { /// The root of the header MMR for convenience. pub fn root(&self) -> Result { - Ok(self.pmmr.root().map_err(|e| ErrorKind::InvalidRoot(e))?) + Ok(self.pmmr.root().map_err(|e| Error::InvalidRoot(e))?) } /// Validate the prev_root of the header against the root of the current header MMR. @@ -990,11 +1118,10 @@ impl<'a> HeaderExtension<'a> { } let root = self.root()?; if root != header.prev_root { - Err(ErrorKind::InvalidRoot(format!( + Err(Error::InvalidRoot(format!( "Unable to validate root, Expected header.prev_root {}, get {}", header.prev_root, root - )) - .into()) + ))) } else { Ok(()) } @@ -1021,6 +1148,7 @@ pub struct Extension<'a> { kernel_pmmr: PMMR<'a, TxKernel, PMMRBackend>, bitmap_accumulator: BitmapAccumulator, + bitmap_cache: Bitmap, /// Rollback flag. rollback: bool, @@ -1033,8 +1161,8 @@ impl<'a> Committed for Extension<'a> { fn outputs_committed(&self) -> Vec { let mut commitments = vec![]; - for pos in self.output_pmmr.leaf_pos_iter() { - if let Some(out) = self.output_pmmr.get_data(pos) { + for pos0 in self.output_pmmr.leaf_pos_iter() { + if let Some(out) = self.output_pmmr.get_data(pos0) { commitments.push(out.commit); } } @@ -1043,7 +1171,7 @@ impl<'a> Committed for Extension<'a> { fn kernels_committed(&self) -> Vec { let mut commitments = vec![]; - for n in 1..self.kernel_pmmr.unpruned_size() + 1 { + for n in 0..self.kernel_pmmr.unpruned_size() { if pmmr::is_leaf(n) { if let Some(kernel) = self.kernel_pmmr.get_data(n) { commitments.push(kernel.excess()); @@ -1058,19 +1186,14 @@ impl<'a> Extension<'a> { fn new(trees: &'a mut TxHashSet, head: Tip) -> Extension<'a> { Extension { head, - output_pmmr: PMMR::at( - &mut trees.output_pmmr_h.backend, - trees.output_pmmr_h.last_pos, - ), - rproof_pmmr: PMMR::at( - &mut trees.rproof_pmmr_h.backend, - trees.rproof_pmmr_h.last_pos, - ), - kernel_pmmr: PMMR::at( - &mut trees.kernel_pmmr_h.backend, - trees.kernel_pmmr_h.last_pos, - ), + output_pmmr: PMMR::at(&mut trees.output_pmmr_h.backend, trees.output_pmmr_h.size), + rproof_pmmr: PMMR::at(&mut trees.rproof_pmmr_h.backend, trees.rproof_pmmr_h.size), + kernel_pmmr: PMMR::at(&mut trees.kernel_pmmr_h.backend, trees.kernel_pmmr_h.size), bitmap_accumulator: trees.bitmap_accumulator.clone(), + bitmap_cache: trees + .bitmap_accumulator + .as_bitmap() + .unwrap_or(Bitmap::new()), rollback: false, } } @@ -1085,11 +1208,39 @@ impl<'a> Extension<'a> { pub fn utxo_view(&'a self, header_ext: &'a HeaderExtension<'a>) -> UTXOView<'a> { UTXOView::new( header_ext.pmmr.readonly_pmmr(), - self.output_pmmr.readonly_pmmr(), - self.rproof_pmmr.readonly_pmmr(), + self.output_readonly_pmmr(), + self.rproof_readonly_pmmr(), ) } + /// Readonly view of our output data. + pub fn output_readonly_pmmr( + &self, + ) -> ReadonlyPMMR> { + self.output_pmmr.readonly_pmmr() + } + + /// Take a snapshot of our bitmap accumulator + pub fn bitmap_accumulator(&self) -> BitmapAccumulator { + self.bitmap_accumulator.clone() + } + + /// Readonly view of our bitmap accumulator data. + pub fn bitmap_readonly_pmmr(&self) -> ReadonlyPMMR> { + self.bitmap_accumulator.readonly_pmmr() + } + + /// Readonly view of our rangeproof data. + pub fn rproof_readonly_pmmr(&self) -> ReadonlyPMMR> { + self.rproof_pmmr.readonly_pmmr() + } + + /// Reset prune lists + pub fn reset_prune_lists(&mut self) { + self.output_pmmr.reset_prune_list(); + self.rproof_pmmr.reset_prune_list(); + } + /// Apply a new block to the current txhashet extension (output, rangeproof, kernel MMRs). /// Returns a vec of commit_pos representing the pos and height of the outputs spent /// by this block. @@ -1144,7 +1295,7 @@ impl<'a> Extension<'a> { // Note: This validates and NRD relative height locks via the "recent" kernel index. self.apply_kernels(b.kernels(), b.header.height, batch)?; - // Update our BitmapAccumulator based on affected outputs (both spent and newd). + // Update our BitmapAccumulator based on affected outputs (both spent and created). self.apply_to_bitmap_accumulator(&affected_pos)?; // Update the head of the extension to reflect the block we just applied. @@ -1154,13 +1305,14 @@ impl<'a> Extension<'a> { } fn apply_to_bitmap_accumulator(&mut self, output_pos: &[u64]) -> Result<(), Error> { + // NOTE: 1-based output_pos shouldn't have 0 in it (but does) let mut output_idx: Vec<_> = output_pos .iter() .map(|x| pmmr::n_leaves(*x).saturating_sub(1)) .collect(); output_idx.sort_unstable(); let min_idx = output_idx.first().cloned().unwrap_or(0); - let size = pmmr::n_leaves(self.output_pmmr.last_pos); + let size = pmmr::n_leaves(self.output_pmmr.size); self.bitmap_accumulator.apply( output_idx, self.output_pmmr @@ -1169,28 +1321,34 @@ impl<'a> Extension<'a> { ) } + /// Sets the bitmap accumulator (as received during PIBD sync) + pub fn set_bitmap_accumulator(&mut self, accumulator: BitmapAccumulator) { + self.bitmap_accumulator = accumulator; + self.bitmap_cache = self.bitmap_accumulator.as_bitmap().unwrap_or(Bitmap::new()); + } + // Prune output and rangeproof PMMRs based on provided pos. // Input is not valid if we cannot prune successfully. fn apply_input(&mut self, commit: Commitment, pos: CommitPos) -> Result<(), Error> { - match self.output_pmmr.prune(pos.pos) { + match self.output_pmmr.prune(pos.pos - 1) { Ok(true) => { self.rproof_pmmr - .prune(pos.pos) - .map_err(|e| ErrorKind::TxHashSetErr(format!("pmmr prune error, {}", e)))?; + .prune(pos.pos - 1) + .map_err(|e| Error::TxHashSetErr(format!("pmmr prune error, {}", e)))?; Ok(()) } - Ok(false) => Err(ErrorKind::AlreadySpent(commit).into()), - Err(e) => Err(ErrorKind::TxHashSetErr(e).into()), + Ok(false) => Err(Error::AlreadySpent(commit)), + Err(e) => Err(Error::TxHashSetErr(e)), } } fn apply_output(&mut self, out: &Output, batch: &Batch<'_>) -> Result { let commit = out.commitment(); - if let Ok(pos) = batch.get_output_pos(&commit) { - if let Some(out_mmr) = self.output_pmmr.get_data(pos) { + if let Ok(pos0) = batch.get_output_pos(&commit) { + if let Some(out_mmr) = self.output_pmmr.get_data(pos0) { if out_mmr.commitment() == commit { - return Err(ErrorKind::DuplicateCommitment(commit).into()); + return Err(Error::DuplicateCommitment(commit)); } } } @@ -1198,30 +1356,160 @@ impl<'a> Extension<'a> { let output_pos = self .output_pmmr .push(&out.identifier()) - .map_err(|e| ErrorKind::TxHashSetErr(format!("pmmr output push error, {}", e)))?; + .map_err(|e| Error::TxHashSetErr(format!("pmmr output push error, {}", e)))?; // push the rangeproof to the MMR. let rproof_pos = self .rproof_pmmr .push(&out.proof()) - .map_err(|e| ErrorKind::TxHashSetErr(format!("pmmr proof push error, {}", e)))?; + .map_err(|e| Error::TxHashSetErr(format!("pmmr proof push error, {}", e)))?; // The output and rproof MMRs should be exactly the same size // and we should have inserted to both in exactly the same pos. { if self.output_pmmr.unpruned_size() != self.rproof_pmmr.unpruned_size() { - return Err( - ErrorKind::Other("output vs rproof MMRs different sizes".to_string()).into(), - ); + return Err(Error::Other( + "output vs rproof MMRs different sizes".to_string(), + )); } if output_pos != rproof_pos { - return Err( - ErrorKind::Other("output vs rproof MMRs different pos".to_string()).into(), - ); + return Err(Error::Other( + "output vs rproof MMRs different pos".to_string(), + )); + } + } + Ok(1 + output_pos) + } + + /// Once the PIBD set is downloaded, we need to ensure that the respective leaf sets + /// match the bitmap (particularly in the case of outputs being spent after a PIBD catch-up) + pub fn update_leaf_sets(&mut self, bitmap: &Bitmap) -> Result<(), Error> { + let flipped = bitmap.flip(0u32..bitmap.maximum().unwrap() + 1); + for spent_pmmr_index in flipped.iter() { + let pos0 = pmmr::insertion_to_pmmr_index(spent_pmmr_index.into()); + self.output_pmmr.remove_from_leaf_set(pos0); + self.rproof_pmmr.remove_from_leaf_set(pos0); + } + Ok(()) + } + + /// Order and sort output segments and hashes, returning an array + /// of elements that can be applied in order to a pmmr + fn sort_pmmr_hashes_and_leaves( + &mut self, + hash_pos: Vec, + leaf_pos: Vec, + skip_leaf_position: Option, + ) -> Vec { + // Merge and into single array and sort into insertion order + let mut ordered_inserts = vec![]; + for (data_index, pos0) in leaf_pos.iter().enumerate() { + // Don't re-push genesis output, basically + if skip_leaf_position == Some(*pos0) { + continue; + } + ordered_inserts.push(OrderedHashLeafNode::Leaf(data_index, *pos0)); + } + for (data_index, pos0) in hash_pos.iter().enumerate() { + ordered_inserts.push(OrderedHashLeafNode::Hash(data_index, *pos0)); + } + ordered_inserts.sort(); + ordered_inserts + } + + /// Apply an output segment to the output PMMR. must be called in order + /// Sort and apply hashes and leaves within a segment to output pmmr, skipping over + /// genesis position. + /// NB: Would like to make this more generic but the hard casting of pmmrs + /// held by this struct makes it awkward to do so + + pub fn apply_output_segment( + &mut self, + segment: Segment, + ) -> Result<(), Error> { + let (_sid, hash_pos, hashes, leaf_pos, leaf_data, _proof) = segment.parts(); + + // insert either leaves or pruned subtrees as we go + for insert in self.sort_pmmr_hashes_and_leaves(hash_pos, leaf_pos, Some(0)) { + match insert { + OrderedHashLeafNode::Hash(idx, pos0) => { + if pos0 >= self.output_pmmr.size { + if self.output_pmmr.size == 1 { + // All initial outputs are spent up to this hash, + // Roll back the genesis output + self.output_pmmr + .rewind(0, &Bitmap::new()) + .map_err(&Error::TxHashSetErr)?; + } + self.output_pmmr + .push_pruned_subtree(hashes[idx], pos0) + .map_err(&Error::TxHashSetErr)?; + } + } + OrderedHashLeafNode::Leaf(idx, pos0) => { + if pos0 == self.output_pmmr.size { + self.output_pmmr + .push(&leaf_data[idx]) + .map_err(&Error::TxHashSetErr)?; + } + let pmmr_index = pmmr::pmmr_leaf_to_insertion_index(pos0); + match pmmr_index { + Some(i) => { + if !self.bitmap_cache.contains(i as u32) { + self.output_pmmr.remove_from_leaf_set(pos0); + } + } + None => {} + }; + } + } + } + Ok(()) + } + + /// Apply a rangeproof segment to the rangeproof PMMR. must be called in order + /// Sort and apply hashes and leaves within a segment to rangeproof pmmr, skipping over + /// genesis position. + pub fn apply_rangeproof_segment(&mut self, segment: Segment) -> Result<(), Error> { + let (_sid, hash_pos, hashes, leaf_pos, leaf_data, _proof) = segment.parts(); + + // insert either leaves or pruned subtrees as we go + for insert in self.sort_pmmr_hashes_and_leaves(hash_pos, leaf_pos, Some(0)) { + match insert { + OrderedHashLeafNode::Hash(idx, pos0) => { + if pos0 >= self.rproof_pmmr.size { + if self.rproof_pmmr.size == 1 { + // All initial outputs are spent up to this hash, + // Roll back the genesis output + self.rproof_pmmr + .rewind(0, &Bitmap::new()) + .map_err(&Error::TxHashSetErr)?; + } + self.rproof_pmmr + .push_pruned_subtree(hashes[idx], pos0) + .map_err(&Error::TxHashSetErr)?; + } + } + OrderedHashLeafNode::Leaf(idx, pos0) => { + if pos0 == self.rproof_pmmr.size { + self.rproof_pmmr + .push(&leaf_data[idx]) + .map_err(&Error::TxHashSetErr)?; + } + let pmmr_index = pmmr::pmmr_leaf_to_insertion_index(pos0); + match pmmr_index { + Some(i) => { + if !self.bitmap_cache.contains(i as u32) { + self.rproof_pmmr.remove_from_leaf_set(pos0); + } + } + None => {} + }; + } } } - Ok(output_pos) + Ok(()) } /// Apply kernels to the kernel MMR. @@ -1243,13 +1531,37 @@ impl<'a> Extension<'a> { Ok(()) } + /// Apply a kernel segment to the output PMMR. must be called in order + pub fn apply_kernel_segment(&mut self, segment: Segment) -> Result<(), Error> { + let (_sid, _hash_pos, _hashes, leaf_pos, leaf_data, _proof) = segment.parts(); + // Non prunable - insert only leaves (with genesis kernel removedj) + for insert in self.sort_pmmr_hashes_and_leaves(vec![], leaf_pos, Some(0)) { + match insert { + OrderedHashLeafNode::Hash(_, _) => { + return Err(Error::InvalidSegment( + "Kernel PMMR is non-prunable, should not have hash data".to_string(), + ) + .into()); + } + OrderedHashLeafNode::Leaf(idx, pos0) => { + if pos0 == self.kernel_pmmr.size { + self.kernel_pmmr + .push(&leaf_data[idx]) + .map_err(&Error::TxHashSetErr)?; + } + } + } + } + Ok(()) + } + /// Push kernel onto MMR (hash and data files). fn apply_kernel(&mut self, kernel: &TxKernel) -> Result { let pos = self .kernel_pmmr .push(kernel) - .map_err(|e| ErrorKind::TxHashSetErr(format!("pmmr push kernel error, {}", e)))?; - Ok(pos) + .map_err(|e| Error::TxHashSetErr(format!("pmmr push kernel error, {}", e)))?; + Ok(1 + pos) } /// Build a Merkle proof for the given output and the block @@ -1265,9 +1577,9 @@ impl<'a> Extension<'a> { let out_id = out_id.as_ref(); debug!("txhashset: merkle_proof: output: {:?}", out_id.commit); // then calculate the Merkle Proof based on the known pos - let pos = batch.get_output_pos(&out_id.commit)?; - let merkle_proof = self.output_pmmr.merkle_proof(pos).map_err(|e| { - ErrorKind::TxHashSetErr(format!("pmmr get merkle proof at pos {}, {}", pos, e)) + let pos0 = batch.get_output_pos(&out_id.commit)?; + let merkle_proof = self.output_pmmr.merkle_proof(pos0).map_err(|e| { + Error::TxHashSetErr(format!("pmmr get merkle proof at pos {}, {}", pos0, e)) })?; Ok(merkle_proof) @@ -1282,15 +1594,16 @@ impl<'a> Extension<'a> { let header = batch.get_block_header(&self.head.last_block_h)?; self.output_pmmr .snapshot(&header) - .map_err(|e| ErrorKind::Other(format!("pmmr snapshot error, {}", e)))?; + .map_err(|e| Error::Other(format!("pmmr snapshot error, {}", e)))?; self.rproof_pmmr .snapshot(&header) - .map_err(|e| ErrorKind::Other(format!("pmmr snapshot error, {}", e)))?; + .map_err(|e| Error::Other(format!("pmmr snapshot error, {}", e)))?; Ok(()) } /// Rewinds the MMRs to the provided block, rewinding to the last output pos - /// and last kernel pos of that block. + /// and last kernel pos of that block. If `updated_bitmap` is supplied, the + /// bitmap accumulator will be replaced with its contents pub fn rewind(&mut self, header: &BlockHeader, batch: &Batch<'_>) -> Result<(), Error> { debug!( "Rewind extension to {} at {} from {} at {}", @@ -1362,11 +1675,11 @@ impl<'a> Extension<'a> { // Update our BitmapAccumulator based on affected outputs. // We want to "unspend" every rewound spent output. - // Treat last_pos as an affected output to ensure we rebuild far enough back. + // Treat size as an affected output to ensure we rebuild far enough back. let mut affected_pos = spent_pos; - affected_pos.push(self.output_pmmr.last_pos); + affected_pos.push(self.output_pmmr.size); - // Remove any entries from the output_pos newd by the block being rewound. + // Remove any entries from the output_pos created by the block being rewound. let mut missing_count = 0; for out in block.outputs() { if batch.delete_output_pos_height(&out.commitment()).is_err() { @@ -1398,9 +1711,9 @@ impl<'a> Extension<'a> { // reused output commitment. For example an output at pos 1, spent, reused at pos 2. // The output_pos index should be updated to reflect the old pos 1 when unspent. if let Ok(spent) = spent { - for pos in spent { - if let Some(out) = self.output_pmmr.get_data(pos.pos) { - batch.save_output_pos_height(&out.commitment(), pos)?; + for pos1 in spent { + if let Some(out) = self.output_pmmr.get_data(pos1.pos - 1) { + batch.save_output_pos_height(&out.commitment(), pos1)?; } } } @@ -1419,13 +1732,13 @@ impl<'a> Extension<'a> { let bitmap: Bitmap = spent_pos.iter().map(|x| *x as u32).collect(); self.output_pmmr .rewind(output_pos, &bitmap) - .map_err(|e| ErrorKind::TxHashSetErr(format!("output_pmmr rewind error, {}", e)))?; + .map_err(|e| Error::TxHashSetErr(format!("output_pmmr rewind error, {}", e)))?; self.rproof_pmmr .rewind(output_pos, &bitmap) - .map_err(|e| ErrorKind::TxHashSetErr(format!("rproof_pmmr rewind error, {}", e)))?; + .map_err(|e| Error::TxHashSetErr(format!("rproof_pmmr rewind error, {}", e)))?; self.kernel_pmmr .rewind(kernel_pos, &Bitmap::new()) - .map_err(|e| ErrorKind::TxHashSetErr(format!("kernel_pmmr rewind error, {}", e)))?; + .map_err(|e| Error::TxHashSetErr(format!("kernel_pmmr rewind error, {}", e)))?; Ok(()) } @@ -1433,21 +1746,9 @@ impl<'a> Extension<'a> { /// and kernel MMRs. pub fn roots(&self) -> Result { Ok(TxHashSetRoots { - output_roots: OutputRoots { - pmmr_root: self - .output_pmmr - .root() - .map_err(|e| ErrorKind::InvalidRoot(e))?, - bitmap_root: self.bitmap_accumulator.root(), - }, - rproof_root: self - .rproof_pmmr - .root() - .map_err(|e| ErrorKind::InvalidRoot(e))?, - kernel_root: self - .kernel_pmmr - .root() - .map_err(|e| ErrorKind::InvalidRoot(e))?, + output_root: self.output_pmmr.root().map_err(|e| Error::InvalidRoot(e))?, + rproof_root: self.rproof_pmmr.root().map_err(|e| Error::InvalidRoot(e))?, + kernel_root: self.kernel_pmmr.root().map_err(|e| Error::InvalidRoot(e))?, }) } @@ -1470,7 +1771,7 @@ impl<'a> Extension<'a> { header.kernel_mmr_size, ) != self.sizes() { - Err(ErrorKind::InvalidMMRSize.into()) + Err(Error::InvalidMMRSize) } else { Ok(()) } @@ -1481,13 +1782,13 @@ impl<'a> Extension<'a> { // validate all hashes and sums within the trees if let Err(e) = self.output_pmmr.validate() { - return Err(ErrorKind::InvalidTxHashSet(e).into()); + return Err(Error::InvalidTxHashSet(e)); } if let Err(e) = self.rproof_pmmr.validate() { - return Err(ErrorKind::InvalidTxHashSet(e).into()); + return Err(Error::InvalidTxHashSet(e)); } if let Err(e) = self.kernel_pmmr.validate() { - return Err(ErrorKind::InvalidTxHashSet(e).into()); + return Err(Error::InvalidTxHashSet(e)); } debug!( @@ -1501,7 +1802,8 @@ impl<'a> Extension<'a> { Ok(()) } - /// Validate full kernel sums against the provided header (for overage and kernel_offset). + /// Validate full kernel sums against the provided header and unspent output bitmap + /// (for overage and kernel_offset). /// This is an expensive operation as we need to retrieve all the UTXOs and kernels /// from the respective MMRs. /// For a significantly faster way of validating full kernel sums see BlockSums. @@ -1532,7 +1834,10 @@ impl<'a> Extension<'a> { genesis: &BlockHeader, fast_validation: bool, status: &dyn TxHashsetWriteStatus, + output_start_pos: Option, + _kernel_start_pos: Option, header: &BlockHeader, + stop_state: Option>, ) -> Result<(Commitment, Commitment), Error> { self.validate_mmrs()?; self.validate_roots(header)?; @@ -1550,10 +1855,26 @@ impl<'a> Extension<'a> { // These are expensive verification step (skipped for "fast validation"). if !fast_validation { // Verify the rangeproof associated with each unspent output. - self.verify_rangeproofs(status)?; + self.verify_rangeproofs( + Some(status), + output_start_pos, + None, + false, + stop_state.clone(), + )?; + if let Some(ref s) = stop_state { + if s.is_stopped() { + return Err(Error::Stopped.into()); + } + } // Verify all the kernel signatures. - self.verify_kernel_signatures(status)?; + self.verify_kernel_signatures(status, stop_state.clone())?; + if let Some(ref s) = stop_state { + if s.is_stopped() { + return Err(Error::Stopped.into()); + } + } } Ok((output_sum, kernel_sum)) @@ -1596,27 +1917,36 @@ impl<'a> Extension<'a> { ) } - fn verify_kernel_signatures(&self, status: &dyn TxHashsetWriteStatus) -> Result<(), Error> { + fn verify_kernel_signatures( + &self, + status: &dyn TxHashsetWriteStatus, + stop_state: Option>, + ) -> Result<(), Error> { let now = Instant::now(); const KERNEL_BATCH_SIZE: usize = 5_000; let mut kern_count = 0; let total_kernels = pmmr::n_leaves(self.kernel_pmmr.unpruned_size()); let mut tx_kernels: Vec = Vec::with_capacity(KERNEL_BATCH_SIZE); - for n in 1..self.kernel_pmmr.unpruned_size() + 1 { + for n in 0..self.kernel_pmmr.unpruned_size() { if pmmr::is_leaf(n) { let kernel = self .kernel_pmmr .get_data(n) - .ok_or_else(|| ErrorKind::TxKernelNotFound)?; + .ok_or_else(|| Error::TxKernelNotFound)?; tx_kernels.push(kernel); } - if tx_kernels.len() >= KERNEL_BATCH_SIZE || n >= self.kernel_pmmr.unpruned_size() { + if tx_kernels.len() >= KERNEL_BATCH_SIZE || n + 1 >= self.kernel_pmmr.unpruned_size() { TxKernel::batch_sig_verify(&tx_kernels)?; kern_count += tx_kernels.len() as u64; tx_kernels.clear(); status.on_validation_kernels(kern_count, total_kernels); + if let Some(ref s) = stop_state { + if s.is_stopped() { + return Ok(()); + } + } debug!( "txhashset: verify_kernel_signatures: verified {} signatures", kern_count, @@ -1634,35 +1964,53 @@ impl<'a> Extension<'a> { Ok(()) } - fn verify_rangeproofs(&self, status: &dyn TxHashsetWriteStatus) -> Result<(), Error> { + fn verify_rangeproofs( + &self, + status: Option<&dyn TxHashsetWriteStatus>, + start_pos: Option, + batch_size: Option, + single_iter: bool, + stop_state: Option>, + ) -> Result { let now = Instant::now(); - let mut commits: Vec = Vec::with_capacity(1_000); - let mut proofs: Vec = Vec::with_capacity(1_000); + let batch_size = batch_size.unwrap_or(1_000); + + let mut commits: Vec = Vec::with_capacity(batch_size); + let mut proofs: Vec = Vec::with_capacity(batch_size); let mut proof_count = 0; + if let Some(s) = start_pos { + if let Some(i) = pmmr::pmmr_leaf_to_insertion_index(s) { + proof_count = self.output_pmmr.n_unpruned_leaves_to_index(i) as usize; + } + } + let total_rproofs = self.output_pmmr.n_unpruned_leaves(); - for pos in self.output_pmmr.leaf_pos_iter() { - let output = self.output_pmmr.get_data(pos); - let proof = self.rproof_pmmr.get_data(pos); + for pos0 in self.output_pmmr.leaf_pos_iter() { + if let Some(p) = start_pos { + if pos0 < p { + continue; + } + } + let output = self.output_pmmr.get_data(pos0); + let proof = self.rproof_pmmr.get_data(pos0); // Output and corresponding rangeproof *must* exist. // It is invalid for either to be missing and we fail immediately in this case. match (output, proof) { (None, _) => { - return Err(ErrorKind::OutputNotFound(format!( + return Err(Error::OutputNotFound(format!( "at verify_rangeproofs for pos {}", - pos - )) - .into()) + pos0 + ))) } (_, None) => { - return Err(ErrorKind::RangeproofNotFound(format!( + return Err(Error::RangeproofNotFound(format!( "at verify_rangeproofs for pos {}", - pos - )) - .into()) + pos0 + ))) } (Some(output), Some(proof)) => { commits.push(output.commit); @@ -1672,7 +2020,7 @@ impl<'a> Extension<'a> { proof_count += 1; - if proofs.len() >= 1_000 { + if proofs.len() >= batch_size { Output::batch_verify_proofs(&commits, &proofs)?; commits.clear(); proofs.clear(); @@ -1680,13 +2028,21 @@ impl<'a> Extension<'a> { "txhashset: verify_rangeproofs: verified {} rangeproofs", proof_count, ); - if proof_count % 1_000 == 0 { - status.on_validation_rproofs(proof_count, total_rproofs); + if let Some(s) = status { + s.on_validation_rproofs(proof_count as u64, total_rproofs); + } + if let Some(ref s) = stop_state { + if s.is_stopped() { + return Ok(pos0); + } + } + if single_iter { + return Ok(pos0); } } } - // remaining part which not full of 1000 range proofs + // remaining part which not full of batch_size range proofs if !proofs.is_empty() { Output::batch_verify_proofs(&commits, &proofs)?; commits.clear(); @@ -1703,7 +2059,7 @@ impl<'a> Extension<'a> { self.rproof_pmmr.unpruned_size(), now.elapsed().as_secs(), ); - Ok(()) + Ok(0) } } @@ -1739,7 +2095,7 @@ pub fn zip_read(root_dir: String, header: &BlockHeader) -> Result { } } - // otherwise, new the zip archive + // otherwise, create the zip archive let path_to_be_cleanup = { // Temp txhashset directory let temp_txhashset_path = Path::new(&root_dir).join(format!( @@ -1765,7 +2121,7 @@ pub fn zip_read(root_dir: String, header: &BlockHeader) -> Result { }; debug!( - "zip_read: {} at {}: newd zip file: {:?}", + "zip_read: {} at {}: created zip file: {:?}", header.hash(), header.height, zip_path @@ -1841,7 +2197,7 @@ pub fn txhashset_replace(from: PathBuf, to: PathBuf) -> Result<(), Error> { // rename the 'from' folder as the 'to' folder if let Err(e) = fs::rename(from.join(TXHASHSET_SUBDIR), to.join(TXHASHSET_SUBDIR)) { error!("hashset_replace fail on {}. err: {}", TXHASHSET_SUBDIR, e); - Err(ErrorKind::TxHashSetErr("txhashset replacing fail".to_string()).into()) + Err(Error::TxHashSetErr("txhashset replacing fail".to_string())) } else { Ok(()) } @@ -1899,7 +2255,7 @@ fn apply_kernel_rules(kernel: &TxKernel, pos: CommitPos, batch: &Batch<'_>) -> R pos.height, prev, relative_height ); if diff < relative_height.into() { - return Err(ErrorKind::NRDRelativeHeight.into()); + return Err(Error::NRDRelativeHeight); } } debug!( diff --git a/chain/src/txhashset/utxo_view.rs b/chain/src/txhashset/utxo_view.rs index 4ed2a8422a..cb0ddf3ddf 100644 --- a/chain/src/txhashset/utxo_view.rs +++ b/chain/src/txhashset/utxo_view.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,10 +15,10 @@ //! Lightweight readonly view into output MMR for convenience. use crate::core::core::hash::{Hash, Hashed}; -use crate::core::core::pmmr::{self, ReadonlyPMMR}; +use crate::core::core::pmmr::{self, ReadablePMMR, ReadonlyPMMR}; use crate::core::core::{Block, BlockHeader, Inputs, Output, OutputIdentifier, Transaction}; use crate::core::global; -use crate::error::{Error, ErrorKind}; +use crate::error::Error; use crate::store::Batch; use crate::types::CommitPos; use crate::util::secp::pedersen::{Commitment, RangeProof}; @@ -104,7 +104,7 @@ impl<'a> UTXOView<'a> { Ok((out, pos)) } else { error!("input mismatch: {:?}, {:?}, {:?}", out, pos, input); - Err(ErrorKind::Other("input mismatch".into()).into()) + Err(Error::Other("input mismatch".into())) } }) }) @@ -123,28 +123,27 @@ impl<'a> UTXOView<'a> { batch: &Batch<'_>, ) -> Result<(OutputIdentifier, CommitPos), Error> { let pos = batch.get_output_pos_height(&input)?; - if let Some(pos) = pos { - if let Some(out) = self.output_pmmr.get_data(pos.pos) { + if let Some(pos1) = pos { + if let Some(out) = self.output_pmmr.get_data(pos1.pos - 1) { if out.commitment() == input { - return Ok((out, pos)); + return Ok((out, pos1)); } else { - error!("input mismatch: {:?}, {:?}, {:?}", out, pos, input); - return Err(ErrorKind::Other( + error!("input mismatch: {:?}, {:?}, {:?}", out, pos1, input); + return Err(Error::Other( "input mismatch (output_pos index mismatch?)".into(), - ) - .into()); + )); } } } - Err(ErrorKind::AlreadySpent(input).into()) + Err(Error::AlreadySpent(input)) } // Output is valid if it would not result in a duplicate commitment in the output MMR. fn validate_output(&self, output: &Output, batch: &Batch<'_>) -> Result<(), Error> { - if let Ok(pos) = batch.get_output_pos(&output.commitment()) { - if let Some(out_mmr) = self.output_pmmr.get_data(pos) { + if let Ok(pos0) = batch.get_output_pos(&output.commitment()) { + if let Some(out_mmr) = self.output_pmmr.get_data(pos0) { if out_mmr.commitment() == output.commitment() { - return Err(ErrorKind::DuplicateCommitment(output.commitment()).into()); + return Err(Error::DuplicateCommitment(output.commitment())); } } } @@ -152,13 +151,13 @@ impl<'a> UTXOView<'a> { } /// Retrieves an unspent output using its PMMR position - pub fn get_unspent_output_at(&self, pos: u64) -> Result { - match self.output_pmmr.get_data(pos) { - Some(output_id) => match self.rproof_pmmr.get_data(pos) { + pub fn get_unspent_output_at(&self, pos0: u64) -> Result { + match self.output_pmmr.get_data(pos0) { + Some(output_id) => match self.rproof_pmmr.get_data(pos0) { Some(rproof) => Ok(output_id.into_output(rproof)), - None => Err(ErrorKind::RangeproofNotFound(format!("at position {}", pos)).into()), + None => Err(Error::RangeproofNotFound(format!("at position {}", pos0))), }, - None => Err(ErrorKind::OutputNotFound(format!("at position {}", pos)).into()), + None => Err(Error::OutputNotFound(format!("at position {}", pos0))), } } @@ -194,7 +193,7 @@ impl<'a> UTXOView<'a> { // If we have not yet reached 1440 blocks then // we can fail immediately as coinbase cannot be mature. if height < global::coinbase_maturity() { - return Err(ErrorKind::ImmatureCoinbase.into()); + return Err(Error::ImmatureCoinbase); } // Find the "cutoff" pos in the output MMR based on the @@ -206,7 +205,7 @@ impl<'a> UTXOView<'a> { // If any output pos exceed the cutoff_pos // we know they have not yet sufficiently matured. if pos > cutoff_pos { - return Err(ErrorKind::ImmatureCoinbase.into()); + return Err(Error::ImmatureCoinbase); } } @@ -214,8 +213,8 @@ impl<'a> UTXOView<'a> { } /// Get the header hash for the specified pos from the underlying MMR backend. - fn get_header_hash(&self, pos: u64) -> Option { - self.header_pmmr.get_data(pos).map(|x| x.hash()) + fn get_header_hash(&self, pos1: u64) -> Option { + self.header_pmmr.get_data(pos1 - 1).map(|x| x.hash()) } /// Get the header at the specified height based on the current state of the extension. @@ -226,12 +225,12 @@ impl<'a> UTXOView<'a> { height: u64, batch: &Batch<'_>, ) -> Result { - let pos = pmmr::insertion_to_pmmr_index(height + 1); - if let Some(hash) = self.get_header_hash(pos) { + let pos1 = 1 + pmmr::insertion_to_pmmr_index(height); + if let Some(hash) = self.get_header_hash(pos1) { let header = batch.get_block_header(&hash)?; Ok(header) } else { - Err(ErrorKind::Other(format!("get header for height {}", height)).into()) + Err(Error::Other(format!("get header for height {}", height))) } } } diff --git a/chain/src/types.rs b/chain/src/types.rs index b8229d9757..8899fc52e3 100644 --- a/chain/src/types.rs +++ b/chain/src/types.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,12 +15,13 @@ //! Base types that the block chain pipeline requires. use chrono::prelude::{DateTime, Utc}; +use chrono::Duration; use crate::core::core::hash::{Hash, Hashed, ZERO_HASH}; -use crate::core::core::{Block, BlockHeader, HeaderVersion}; +use crate::core::core::{pmmr, Block, BlockHeader, SegmentTypeIdentifier}; use crate::core::pow::Difficulty; -use crate::core::ser::{self, PMMRIndexHashable, Readable, Reader, Writeable, Writer}; -use crate::error::{Error, ErrorKind}; +use crate::core::ser::{self, Readable, Reader, Writeable, Writer}; +use crate::error::Error; use crate::util::{RwLock, RwLockWriteGuard}; bitflags! { @@ -38,7 +39,7 @@ bitflags! { } /// Various status sync can be in, whether it's fast sync or archival. -#[derive(Debug, Clone, Copy, Eq, PartialEq, Deserialize, Serialize)] +#[derive(Debug, Clone, Copy, PartialEq, Deserialize, Serialize)] pub enum SyncStatus { /// Initial State (we do not yet know if we are/should be syncing) Initial, @@ -49,15 +50,45 @@ pub enum SyncStatus { AwaitingPeers(bool), /// Downloading block headers HeaderSync { - /// current node height - current_height: u64, + /// current sync head + sync_head: Tip, /// height of the most advanced peer highest_height: u64, + /// diff of the most advanced peer + highest_diff: Difficulty, + }, + /// Performing PIBD reconstruction of txhashset + /// If PIBD syncer determines there's not enough + /// PIBD peers to continue, then move on to TxHashsetDownload state + TxHashsetPibd { + /// Whether the syncer has determined there's not enough + /// data to continue via PIBD + aborted: bool, + /// whether we got an error anywhere (in which case restart the process) + errored: bool, + /// total number of leaves applied + completed_leaves: u64, + /// total number of leaves required by archive header + leaves_required: u64, + /// 'height', i.e. last 'block' for which there is complete + /// pmmr data + completed_to_height: u64, + /// Total 'height' needed + required_height: u64, }, /// Downloading the various txhashsets TxHashsetDownload(TxHashsetDownloadStats), /// Setting up before validation - TxHashsetSetup, + TxHashsetSetup { + /// number of 'headers' for which kernels have been checked + headers: Option, + /// headers total + headers_total: Option, + /// kernel position portion + kernel_pos: Option, + /// total kernel position + kernel_pos_total: Option, + }, /// Validating the kernels TxHashsetKernelsValidation { /// kernels validated @@ -117,10 +148,36 @@ impl Default for TxHashsetDownloadStats { } } +/// Container for entry in requested PIBD segments +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct PIBDSegmentContainer { + /// Segment+Type Identifier + pub identifier: SegmentTypeIdentifier, + /// Time at which this request was made + pub request_time: DateTime, +} + +impl PIBDSegmentContainer { + /// Return container with timestamp + pub fn new(identifier: SegmentTypeIdentifier) -> Self { + Self { + identifier, + request_time: Utc::now(), + } + } +} + /// Current sync state. Encapsulates the current SyncStatus. pub struct SyncState { current: RwLock, sync_error: RwLock>, + /// Something has to keep track of segments that have been + /// requested from other peers. TODO consider: This may not + /// be the best place to put code that's concerned with peers + /// but it's currently the only place that makes the info + /// available where it will be needed (both in the adapter + /// and the sync loop) + requested_pibd_segments: RwLock>, } impl SyncState { @@ -129,9 +186,16 @@ impl SyncState { SyncState { current: RwLock::new(SyncStatus::Initial), sync_error: RwLock::new(None), + requested_pibd_segments: RwLock::new(vec![]), } } + /// Reset sync status to NoSync. + pub fn reset(&self) { + self.clear_sync_error(); + self.update(SyncStatus::NoSync); + } + /// Whether the current state matches any active syncing operation. /// Note: This includes our "initial" state. pub fn is_syncing(&self) -> bool { @@ -176,11 +240,76 @@ impl SyncState { } } + /// Update sync_head if state is currently HeaderSync. + pub fn update_header_sync(&self, new_sync_head: Tip) { + let status: &mut SyncStatus = &mut self.current.write(); + match status { + SyncStatus::HeaderSync { sync_head, .. } => { + *sync_head = new_sync_head; + } + _ => (), + } + } + /// Update txhashset downloading progress pub fn update_txhashset_download(&self, stats: TxHashsetDownloadStats) { *self.current.write() = SyncStatus::TxHashsetDownload(stats); } + /// Update PIBD progress + pub fn update_pibd_progress( + &self, + aborted: bool, + errored: bool, + completed_leaves: u64, + completed_to_height: u64, + archive_header: &BlockHeader, + ) { + let leaves_required = pmmr::n_leaves(archive_header.output_mmr_size) * 2 + + pmmr::n_leaves(archive_header.kernel_mmr_size); + *self.current.write() = SyncStatus::TxHashsetPibd { + aborted, + errored, + completed_leaves, + leaves_required, + completed_to_height, + required_height: archive_header.height, + }; + } + + /// Update PIBD segment list + pub fn add_pibd_segment(&self, id: &SegmentTypeIdentifier) { + self.requested_pibd_segments + .write() + .push(PIBDSegmentContainer::new(id.clone())); + } + + /// Remove segment from list + pub fn remove_pibd_segment(&self, id: &SegmentTypeIdentifier) { + self.requested_pibd_segments + .write() + .retain(|i| &i.identifier != id); + } + + /// Remove segments with request timestamps less than cutoff time + pub fn remove_stale_pibd_requests(&self, timeout_seconds: i64) { + let cutoff_time = Utc::now() - Duration::seconds(timeout_seconds); + self.requested_pibd_segments.write().retain(|i| { + if i.request_time <= cutoff_time { + debug!("Removing + retrying PIBD request after timeout: {:?}", i) + }; + i.request_time > cutoff_time + }); + } + + /// Check whether segment is in request list + pub fn contains_pibd_segment(&self, id: &SegmentTypeIdentifier) -> bool { + self.requested_pibd_segments + .read() + .iter() + .any(|i| &i.identifier == id) + } + /// Communicate sync error pub fn set_sync_error(&self, error: Error) { *self.sync_error.write() = Some(error); @@ -198,8 +327,19 @@ impl SyncState { } impl TxHashsetWriteStatus for SyncState { - fn on_setup(&self) { - self.update(SyncStatus::TxHashsetSetup); + fn on_setup( + &self, + headers: Option, + headers_total: Option, + kernel_pos: Option, + kernel_pos_total: Option, + ) { + self.update(SyncStatus::TxHashsetSetup { + headers, + headers_total, + kernel_pos, + kernel_pos_total, + }); } fn on_validation_kernels(&self, kernels: u64, kernels_total: u64) { @@ -229,7 +369,7 @@ impl TxHashsetWriteStatus for SyncState { #[derive(Debug)] pub struct TxHashSetRoots { /// Output roots - pub output_roots: OutputRoots, + pub output_root: Hash, /// Range Proof root pub rproof_root: Hash, /// Kernel root @@ -237,37 +377,39 @@ pub struct TxHashSetRoots { } impl TxHashSetRoots { - /// Accessor for the output PMMR root (rules here are block height dependent). - /// We assume the header version is consistent with the block height, validated - /// as part of pipe::validate_header(). - pub fn output_root(&self, header: &BlockHeader) -> Hash { - self.output_roots.root(header) - } - /// Validate roots against the provided block header. pub fn validate(&self, header: &BlockHeader) -> Result<(), Error> { debug!( - "validate roots: {} at {}, {} vs. {} (original: {}, merged: {})", + "validate roots: {} at {}, {} vs. {}", header.hash(), header.height, header.output_root, - self.output_root(header), - self.output_roots.pmmr_root, - self.output_roots.merged_root(header), + self.output_root, ); - if header.output_root != self.output_root(header) { - Err(ErrorKind::InvalidRoot("Failed Output root validation".to_string()).into()) + if header.output_root != self.output_root { + Err(Error::InvalidRoot( + "Failed Output root validation".to_string(), + )) } else if header.range_proof_root != self.rproof_root { - Err(ErrorKind::InvalidRoot("Failed Range Proof root validation".to_string()).into()) + Err(Error::InvalidRoot( + "Failed Range Proof root validation".to_string(), + )) } else if header.kernel_root != self.kernel_root { - Err(ErrorKind::InvalidRoot("Failed Kernel root validation".to_string()).into()) + Err(Error::InvalidRoot( + "Failed Kernel root validation".to_string(), + )) } else { Ok(()) } } } +// Note, In MWC there is no merged root. The only purpose for the merge root is to simplify syncronization. +// In MWC sync process is not related to the blockchain. MWC using PIBD start handshake to receive +// the output bitmap root from the peers. In case if bitmap is forged, the whole process will fail at the end, +// node will detect that ban forger peers. +/* /// A helper for the various output roots. #[derive(Debug)] pub struct OutputRoots { @@ -301,7 +443,7 @@ impl OutputRoots { fn merged_root(&self, header: &BlockHeader) -> Hash { (self.pmmr_root, self.bitmap_root).hash_with_index(header.output_mmr_size) } -} +}*/ /// Minimal struct representing a known MMR position and associated block height. #[derive(Clone, Copy, Debug, PartialEq)] @@ -373,6 +515,18 @@ pub struct Tip { impl Tip { /// Creates a new tip based on provided header. pub fn from_header(header: &BlockHeader) -> Tip { + header.into() + } +} + +impl From for Tip { + fn from(header: BlockHeader) -> Self { + Self::from(&header) + } +} + +impl From<&BlockHeader> for Tip { + fn from(header: &BlockHeader) -> Self { Tip { height: header.height, last_block_h: header.hash(), @@ -441,7 +595,13 @@ pub trait ChainAdapter { /// those values as the processing progresses. pub trait TxHashsetWriteStatus { /// First setup of the txhashset - fn on_setup(&self); + fn on_setup( + &self, + headers: Option, + header_total: Option, + kernel_pos: Option, + kernel_pos_total: Option, + ); /// Starting kernel validation fn on_validation_kernels(&self, kernels: u64, kernel_total: u64); /// Starting rproof validation @@ -456,7 +616,7 @@ pub trait TxHashsetWriteStatus { pub struct NoStatus; impl TxHashsetWriteStatus for NoStatus { - fn on_setup(&self) {} + fn on_setup(&self, _hs: Option, _ht: Option, _kp: Option, _kpt: Option) {} fn on_validation_kernels(&self, _ks: u64, _kts: u64) {} fn on_validation_rproofs(&self, _rs: u64, _rt: u64) {} fn on_save(&self) {} diff --git a/chain/tests/bitmap_accumulator.rs b/chain/tests/bitmap_accumulator.rs index 5d48030389..39844b592f 100644 --- a/chain/tests/bitmap_accumulator.rs +++ b/chain/tests/bitmap_accumulator.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/chain/tests/bitmap_segment.rs b/chain/tests/bitmap_segment.rs new file mode 100644 index 0000000000..960625baf7 --- /dev/null +++ b/chain/tests/bitmap_segment.rs @@ -0,0 +1,85 @@ +use self::chain::txhashset::{BitmapAccumulator, BitmapSegment}; +use self::core::core::pmmr::segment::{Segment, SegmentIdentifier}; +use self::core::ser::{ + BinReader, BinWriter, DeserializationMode, ProtocolVersion, Readable, Writeable, +}; +use croaring::Bitmap; +use grin_chain as chain; +use grin_core as core; +use grin_util::secp::rand::Rng; +use rand::thread_rng; +use std::io::Cursor; + +fn test_roundtrip(entries: usize) { + let mut rng = thread_rng(); + + let identifier = SegmentIdentifier { + height: 12, + idx: rng.gen_range(8, 16), + }; + let block = rng.gen_range(2, 64); + + let mut bitmap = Bitmap::new(); + let block_size = 1 << 16; + let offset = (1 << identifier.height) * 1024 * identifier.idx + block_size * block; + let mut count = 0; + while count < entries { + let idx = (offset + rng.gen_range(0, block_size)) as u32; + if !bitmap.contains(idx) { + count += 1; + bitmap.add(idx); + } + } + + // Add a bunch of segments after the one we are interested in + let size = + bitmap.maximum().unwrap() as u64 + (1 << identifier.height) * 1024 * rng.gen_range(0, 64); + + // Construct the accumulator + let mut accumulator = BitmapAccumulator::new(); + accumulator + .init(bitmap.iter().map(|v| v as u64), size) + .unwrap(); + + let mmr = accumulator.readonly_pmmr(); + let segment = Segment::from_pmmr(identifier, &mmr, false).unwrap(); + + // Convert to `BitmapSegment` + let bms = BitmapSegment::from(segment.clone()); + + // Serialize `BitmapSegment` + let mut cursor = Cursor::new(Vec::::new()); + let mut writer = BinWriter::new(&mut cursor, ProtocolVersion(1)); + Writeable::write(&bms, &mut writer).unwrap(); + + // Read `BitmapSegment` + cursor.set_position(0); + let mut reader = BinReader::new( + &mut cursor, + ProtocolVersion(1), + DeserializationMode::default(), + ); + let bms2: BitmapSegment = Readable::read(&mut reader).unwrap(); + assert_eq!(bms, bms2); + + // Convert back to `Segment` + let segment2 = Segment::from(bms2); + assert_eq!(segment, segment2); +} + +#[test] +fn segment_ser_roundtrip() { + let threshold = 4096; + test_roundtrip(thread_rng().gen_range(threshold, 4 * threshold)); +} + +#[test] +fn sparse_segment_ser_roundtrip() { + test_roundtrip(thread_rng().gen_range(1024, 4096)); +} + +#[test] +fn abundant_segment_ser_roundtrip() { + let max = 1 << 16; + test_roundtrip(thread_rng().gen_range(max - 4096, max - 1024)); +} diff --git a/chain/tests/chain_test_helper.rs b/chain/tests/chain_test_helper.rs index 6b4ceae539..ac89063b15 100644 --- a/chain/tests/chain_test_helper.rs +++ b/chain/tests/chain_test_helper.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,34 +16,30 @@ use self::chain::types::NoopAdapter; use self::chain::types::Options; use self::chain::Chain; use self::core::core::hash::Hashed; -use self::core::core::verifier_cache::LruVerifierCache; use self::core::core::Block; use self::core::genesis; use self::core::global::ChainTypes; use self::core::libtx::{self, reward}; use self::core::{consensus, global, pow}; use self::keychain::{ExtKeychainPath, Keychain}; -use self::util::RwLock; use chrono::Duration; use grin_chain as chain; use grin_core as core; use grin_keychain as keychain; -use grin_util as util; use std::fs; use std::sync::Arc; +#[allow(dead_code)] pub fn clean_output_dir(dir_name: &str) { let _ = fs::remove_dir_all(dir_name); } pub fn init_chain(dir_name: &str, genesis: Block) -> Chain { - let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); Chain::init( dir_name.to_string(), Arc::new(NoopAdapter {}), genesis, pow::verify_size, - verifier_cache, false, ) .unwrap() @@ -87,7 +83,8 @@ where { for n in 1..chain_length { let prev = chain.head_header().unwrap(); - let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap()); + let next_header_info = + consensus::next_difficulty(prev.height + 1, chain.difficulty_iter().unwrap()); let pk = ExtKeychainPath::new(1, n as u32, 0, 0, 0).to_identifier(); let reward = libtx::reward::output( keychain, @@ -98,8 +95,8 @@ where n, ) .unwrap(); - let mut b = core::core::Block::new(&prev, &[], next_header_info.clone().difficulty, reward) - .unwrap(); + let mut b = + core::core::Block::new(&prev, &[], next_header_info.difficulty, reward).unwrap(); b.header.timestamp = prev.timestamp + Duration::seconds(60); b.header.pow.secondary_scaling = next_header_info.secondary_scaling; diff --git a/chain/tests/data_file_integrity.rs b/chain/tests/data_file_integrity.rs index f9ae40612f..5a6f4bda46 100644 --- a/chain/tests/data_file_integrity.rs +++ b/chain/tests/data_file_integrity.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,10 +16,10 @@ use self::core::genesis; use grin_core as core; use grin_util as util; -mod chain_test_helper; - use self::chain_test_helper::{clean_output_dir, init_chain, mine_chain}; +mod chain_test_helper; + #[test] fn data_files() { util::init_test_logger(); diff --git a/chain/tests/mine_nrd_kernel.rs b/chain/tests/mine_nrd_kernel.rs index 59f2b68bfd..f30df11e13 100644 --- a/chain/tests/mine_nrd_kernel.rs +++ b/chain/tests/mine_nrd_kernel.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -33,14 +33,14 @@ where { let prev = chain.head_header().unwrap(); let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap()); - let fee = txs.iter().map(|x| x.fee()).sum(); + let fee = txs.iter().map(|x| x.fee(prev.height + 1)).sum(); let reward = reward::output( keychain, &ProofBuilder::new(keychain), key_id, fee, false, - 1, + prev.height + 1, ) .unwrap(); @@ -91,7 +91,7 @@ fn mine_block_with_nrd_kernel_and_nrd_feature_enabled() { let key_id2 = ExtKeychainPath::new(1, 2, 0, 0, 0).to_identifier(); let tx = build::transaction( KernelFeatures::NoRecentDuplicate { - fee: 20000, + fee: 20000.into(), relative_height: NRDRelativeHeight::new(1440).unwrap(), }, &[ @@ -138,7 +138,7 @@ fn mine_invalid_block_with_nrd_kernel_and_nrd_feature_enabled_before_hf() { let key_id2 = ExtKeychainPath::new(1, 2, 0, 0, 0).to_identifier(); let tx = build::transaction( KernelFeatures::NoRecentDuplicate { - fee: 20000, + fee: 20000.into(), relative_height: NRDRelativeHeight::new(1440).unwrap(), }, &[ diff --git a/chain/tests/mine_simple_chain.rs b/chain/tests/mine_simple_chain.rs index 5516289915..fb9e0e8334 100644 --- a/chain/tests/mine_simple_chain.rs +++ b/chain/tests/mine_simple_chain.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,13 +15,18 @@ use self::chain::types::{NoopAdapter, Tip}; use self::chain::Chain; use self::core::core::hash::Hashed; -use self::core::core::verifier_cache::LruVerifierCache; -use self::core::core::{Block, BlockHeader, KernelFeatures, Transaction}; +use self::core::core::{ + block, transaction, Block, BlockHeader, KernelFeatures, Output, OutputFeatures, Transaction, +}; use self::core::global::ChainTypes; -use self::core::libtx::{self, build, ProofBuilder}; +use self::core::libtx::build::{self, Append}; +use self::core::libtx::proof::{self, ProofBuild}; +use self::core::libtx::{self, Error, ProofBuilder}; use self::core::pow::Difficulty; use self::core::{consensus, global, pow}; -use self::keychain::{ExtKeychain, ExtKeychainPath, Keychain}; +use self::keychain::{ + BlindSum, ExtKeychain, ExtKeychainPath, Identifier, Keychain, SwitchCommitmentType, +}; use self::util::RwLock; use chrono::Duration; use grin_chain as chain; @@ -56,13 +61,11 @@ impl ChainAdapter for StatusAdapter { fn setup_with_status_adapter(dir_name: &str, genesis: Block, adapter: Arc) -> Chain { util::init_test_logger(); clean_output_dir(dir_name); - let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); let chain = chain::Chain::init( dir_name.to_string(), adapter, genesis, pow::verify_size, - verifier_cache, false, ) .unwrap(); @@ -569,7 +572,7 @@ fn spend_rewind_spend() { let key_id30 = ExtKeychainPath::new(1, 30, 0, 0, 0).to_identifier(); let tx1 = build::transaction( - KernelFeatures::Plain { fee: 20000 }, + KernelFeatures::Plain { fee: 20000.into() }, &[ build::coinbase_input(consensus::MWC_FIRST_GROUP_REWARD, key_id_coinbase.clone()), build::output(consensus::MWC_FIRST_GROUP_REWARD - 20000, key_id30.clone()), @@ -645,7 +648,7 @@ fn spend_in_fork_and_compact() { let key_id31 = ExtKeychainPath::new(1, 31, 0, 0, 0).to_identifier(); let tx1 = build::transaction( - KernelFeatures::Plain { fee: 20000 }, + KernelFeatures::Plain { fee: 20000.into() }, &[ build::coinbase_input(consensus::MWC_FIRST_GROUP_REWARD, key_id2.clone()), build::output(consensus::MWC_FIRST_GROUP_REWARD - 20000, key_id30.clone()), @@ -663,7 +666,7 @@ fn spend_in_fork_and_compact() { chain.validate(false).unwrap(); let tx2 = build::transaction( - KernelFeatures::Plain { fee: 20000 }, + KernelFeatures::Plain { fee: 20000.into() }, &[ build::input(consensus::MWC_FIRST_GROUP_REWARD - 20000, key_id30.clone()), build::output(consensus::MWC_FIRST_GROUP_REWARD - 40000, key_id31.clone()), @@ -762,7 +765,9 @@ fn spend_in_fork_and_compact() { /// Test ability to retrieve block headers for a given output #[test] fn output_header_mappings() { + clean_output_dir(".grin_header_for_output"); global::set_local_chain_type(ChainTypes::AutomatedTesting); + util::init_test_logger(); { clean_output_dir(".mwc_header_for_output"); let chain = init_chain(".mwc_header_for_output", pow::mine_genesis_block().unwrap()); @@ -771,7 +776,8 @@ fn output_header_mappings() { for n in 1..15 { let prev = chain.head_header().unwrap(); - let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap()); + let next_header_info = + consensus::next_difficulty(prev.height + 1, chain.difficulty_iter().unwrap()); let pk = ExtKeychainPath::new(1, n as u32, 0, 0, 0).to_identifier(); let reward = libtx::reward::output( &keychain, @@ -779,7 +785,7 @@ fn output_header_mappings() { &pk, 0, false, - n, + prev.height + 1, ) .unwrap(); reward_outputs.push(reward.0.clone()); @@ -809,7 +815,7 @@ fn output_header_mappings() { chain.process_block(b, chain::Options::MINE).unwrap(); let header_for_output = chain - .get_header_for_output(reward_outputs[n as usize - 1].commitment()) + .get_header_for_output(reward_outputs[n - 1].commitment()) .unwrap(); assert_eq!(header_for_output.height, n as u64); @@ -828,6 +834,158 @@ fn output_header_mappings() { clean_output_dir(".mwc_header_for_output"); } +/// Build a negative output. This function must not be used outside of tests. +/// The commitment will be an inversion of the value passed in and the value is +/// subtracted from the sum. +fn build_output_negative(value: u64, key_id: Identifier) -> Box> +where + K: Keychain, + B: ProofBuild, +{ + Box::new( + move |build, acc| -> Result<(Transaction, BlindSum), Error> { + let (tx, sum) = acc?; + + // TODO: proper support for different switch commitment schemes + let switch = SwitchCommitmentType::Regular; + + let commit = build.keychain.commit(value, &key_id, switch)?; + + // invert commitment + let commit = build.keychain.secp().commit_sum(vec![], vec![commit])?; + + eprintln!("Building output: {}, {:?}", value, commit); + + // build a proof with a rangeproof of 0 as a placeholder + // the test will replace this later + let proof = proof::create( + build.keychain, + build.builder, + 0, + &key_id, + switch, + commit, + None, + )?; + + // we return the output and the value is subtracted instead of added + Ok(( + tx.with_output(Output::new(OutputFeatures::Plain, commit, proof)), + sum.sub_key_id(key_id.to_value_path(value)), + )) + }, + ) +} + +/// Test the duplicate rangeproof bug +#[test] +fn test_overflow_cached_rangeproof() { + clean_output_dir(".grin_overflow"); + global::set_local_chain_type(ChainTypes::AutomatedTesting); + + util::init_test_logger(); + { + let chain = init_chain(".grin_overflow", pow::mine_genesis_block().unwrap()); + let prev = chain.head_header().unwrap(); + let kc = ExtKeychain::from_random_seed(false).unwrap(); + let pb = ProofBuilder::new(&kc); + + let mut head = prev; + + // mine the first block and keep track of the block_hash + // so we can spend the coinbase later + let b = prepare_block(&kc, &head, &chain, 2); + + assert!(b.outputs()[0].is_coinbase()); + head = b.header.clone(); + chain + .process_block(b.clone(), chain::Options::SKIP_POW) + .unwrap(); + + // now mine three further blocks + for n in 3..6 { + let b = prepare_block(&kc, &head, &chain, n); + head = b.header.clone(); + chain.process_block(b, chain::Options::SKIP_POW).unwrap(); + } + + // create a few keys for use in txns + let key_id2 = ExtKeychainPath::new(1, 2, 0, 0, 0).to_identifier(); + let key_id30 = ExtKeychainPath::new(1, 30, 0, 0, 0).to_identifier(); + let key_id31 = ExtKeychainPath::new(1, 31, 0, 0, 0).to_identifier(); + let key_id32 = ExtKeychainPath::new(1, 32, 0, 0, 0).to_identifier(); + + // build a regular transaction so we have a rangeproof to copy + let tx1 = build::transaction( + KernelFeatures::Plain { fee: 20000.into() }, + &[ + build::coinbase_input( + consensus::calc_mwc_block_reward(chain.head().unwrap().height), + key_id2.clone(), + ), + build::output( + consensus::calc_mwc_block_reward(chain.head().unwrap().height) - 20000, + key_id30.clone(), + ), + ], + &kc, + &pb, + ) + .unwrap(); + + // mine block with tx1 + let next = prepare_block_tx(&kc, &head, &chain, 7, &[tx1.clone()]); + let prev_main = next.header.clone(); + chain + .process_block(next.clone(), chain::Options::SKIP_POW) + .unwrap(); + chain.validate(false).unwrap(); + + // create a second tx that contains a negative output + // and a positive output for 1m grin + let mut tx2 = build::transaction( + KernelFeatures::Plain { fee: 0.into() }, + &[ + build::input( + consensus::calc_mwc_block_reward(chain.head().unwrap().height) - 20000, + key_id30.clone(), + ), + build::output( + consensus::calc_mwc_block_reward(chain.head().unwrap().height) - 20000 + + 1_000_000_000_000_000, + key_id31.clone(), + ), + build_output_negative(1_000_000_000_000_000, key_id32.clone()), + ], + &kc, + &pb, + ) + .unwrap(); + + // make sure tx1 only has one output as expected + assert_eq!(tx1.body.outputs.len(), 1); + let last_rp = tx1.body.outputs[0].proof; + + // overwrite all our rangeproofs with the rangeproof from last block + for i in 0..tx2.body.outputs.len() { + tx2.body.outputs[i].proof = last_rp; + } + + let next = prepare_block_tx(&kc, &prev_main, &chain, 8, &[tx2.clone()]); + // process_block fails with verifier_cache disabled or with correct verifier_cache + // implementations + let res = chain.process_block(next, chain::Options::SKIP_POW); + + assert_eq!( + res.unwrap_err(), + chain::Error::Block(block::Error::Transaction(transaction::Error::Secp( + util::secp::Error::InvalidRangeProof + ))) + ); + } + clean_output_dir(".grin_overflow"); +} + // Use diff as both diff *and* key_idx for convenience (deterministic private key for test blocks) fn prepare_block(kc: &K, prev: &BlockHeader, chain: &Chain, diff: u64) -> Block where @@ -896,7 +1054,8 @@ where let proof_size = global::proofsize(); let key_id = ExtKeychainPath::new(1, key_idx, 0, 0, 0).to_identifier(); - let fees = txs.iter().map(|tx| tx.fee()).sum(); + let height = prev.height + 1; + let fees = txs.iter().map(|tx| tx.fee(height)).sum(); let reward = libtx::reward::output( kc, &libtx::ProofBuilder::new(kc), @@ -921,13 +1080,11 @@ where fn actual_diff_iter_output() { global::set_local_chain_type(ChainTypes::AutomatedTesting); let genesis_block = pow::mine_genesis_block().unwrap(); - let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); let chain = chain::Chain::init( "../.mwc".to_string(), Arc::new(NoopAdapter {}), genesis_block, pow::verify_size, - verifier_cache, false, ) .unwrap(); diff --git a/chain/tests/nrd_validation_rules.rs b/chain/tests/nrd_validation_rules.rs index c52d357c9e..251ffefba1 100644 --- a/chain/tests/nrd_validation_rules.rs +++ b/chain/tests/nrd_validation_rules.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -54,7 +54,7 @@ where { let next_header_info = consensus::next_difficulty(prev.height, chain.difficulty_iter().unwrap()); - let fee = txs.iter().map(|x| x.fee()).sum(); + let fee = txs.iter().map(|x| x.fee(prev.height + 1)).sum(); let reward = reward::output( keychain, &ProofBuilder::new(keychain), @@ -65,7 +65,8 @@ where ) .unwrap(); - let mut block = Block::new(prev, &txs, next_header_info.clone().difficulty, reward)?; + let mut block = Block::new(prev, &txs, next_header_info.clone().difficulty, reward) + .map_err(|e| Error::Block(e))?; block.header.timestamp = prev.timestamp + Duration::seconds(60); block.header.pow.secondary_scaling = next_header_info.secondary_scaling; @@ -107,7 +108,7 @@ fn process_block_nrd_validation() -> Result<(), Error> { assert_eq!(chain.head()?.height, 8); let mut kernel = TxKernel::with_features(KernelFeatures::NoRecentDuplicate { - fee: 20000, + fee: 20000.into(), relative_height: NRDRelativeHeight::new(2)?, }); @@ -115,10 +116,10 @@ fn process_block_nrd_validation() -> Result<(), Error> { let msg = kernel.msg_to_sign().unwrap(); // // Generate a kernel with public excess and associated signature. - let excess = BlindingFactor::rand(); - let skey = excess.secret_key().unwrap(); + let excess = BlindingFactor::rand(keychain.secp()); + let skey = excess.secret_key(keychain.secp()).unwrap(); kernel.excess = keychain.secp().commit(0, skey).unwrap(); - let pubkey = &kernel.excess.to_pubkey().unwrap(); + let pubkey = &kernel.excess.to_pubkey(keychain.secp()).unwrap(); kernel.excess_sig = aggsig::sign_with_blinding(&keychain.secp(), &msg, &excess, Some(&pubkey)).unwrap(); kernel.verify().unwrap(); @@ -223,7 +224,7 @@ fn process_block_nrd_validation_relative_height_1() -> Result<(), Error> { assert_eq!(chain.head()?.height, 8); let mut kernel = TxKernel::with_features(KernelFeatures::NoRecentDuplicate { - fee: 20000, + fee: 20000.into(), relative_height: NRDRelativeHeight::new(1)?, }); @@ -231,10 +232,10 @@ fn process_block_nrd_validation_relative_height_1() -> Result<(), Error> { let msg = kernel.msg_to_sign().unwrap(); // // Generate a kernel with public excess and associated signature. - let excess = BlindingFactor::rand(); - let skey = excess.secret_key().unwrap(); + let excess = BlindingFactor::rand(keychain.secp()); + let skey = excess.secret_key(keychain.secp()).unwrap(); kernel.excess = keychain.secp().commit(0, skey).unwrap(); - let pubkey = &kernel.excess.to_pubkey().unwrap(); + let pubkey = &kernel.excess.to_pubkey(keychain.secp()).unwrap(); kernel.excess_sig = aggsig::sign_with_blinding(&keychain.secp(), &msg, &excess, Some(&pubkey)).unwrap(); kernel.verify().unwrap(); @@ -322,7 +323,7 @@ fn process_block_nrd_validation_fork() -> Result<(), Error> { assert_eq!(header_8.height, 8); let mut kernel = TxKernel::with_features(KernelFeatures::NoRecentDuplicate { - fee: 20000, + fee: 20000.into(), relative_height: NRDRelativeHeight::new(2)?, }); @@ -330,10 +331,10 @@ fn process_block_nrd_validation_fork() -> Result<(), Error> { let msg = kernel.msg_to_sign().unwrap(); // // Generate a kernel with public excess and associated signature. - let excess = BlindingFactor::rand(); - let skey = excess.secret_key().unwrap(); + let excess = BlindingFactor::rand(keychain.secp()); + let skey = excess.secret_key(keychain.secp()).unwrap(); kernel.excess = keychain.secp().commit(0, skey).unwrap(); - let pubkey = &kernel.excess.to_pubkey().unwrap(); + let pubkey = &kernel.excess.to_pubkey(keychain.secp()).unwrap(); kernel.excess_sig = aggsig::sign_with_blinding(&keychain.secp(), &msg, &excess, Some(&pubkey)).unwrap(); kernel.verify().unwrap(); diff --git a/chain/tests/process_block_cut_through.rs b/chain/tests/process_block_cut_through.rs index ed794f3d16..1bbb0a2a8e 100644 --- a/chain/tests/process_block_cut_through.rs +++ b/chain/tests/process_block_cut_through.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -21,15 +21,12 @@ use grin_util as util; use self::chain_test_helper::{clean_output_dir, genesis_block, init_chain}; use crate::chain::{pipe, Chain, Options}; -use crate::core::core::verifier_cache::LruVerifierCache; use crate::core::core::{block, pmmr, transaction}; -use crate::core::core::{Block, KernelFeatures, Transaction, Weighting}; +use crate::core::core::{Block, FeeFields, KernelFeatures, Transaction, Weighting}; use crate::core::libtx::{build, reward, ProofBuilder}; use crate::core::{consensus, global, pow}; use crate::keychain::{ExtKeychain, ExtKeychainPath, Keychain, SwitchCommitmentType}; -use crate::util::RwLock; use chrono::Duration; -use std::sync::Arc; fn build_block( chain: &Chain, @@ -43,7 +40,7 @@ where let prev = chain.head_header().unwrap(); let next_height = prev.height + 1; let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter()?); - let fee = txs.iter().map(|x| x.fee()).sum(); + let fee = txs.iter().map(|x| x.fee(next_height)).sum(); let key_id = ExtKeychainPath::new(1, next_height as u32, 0, 0, 0).to_identifier(); let reward = reward::output( keychain, @@ -55,7 +52,8 @@ where ) .unwrap(); - let mut block = Block::new(&prev, txs, next_header_info.clone().difficulty, reward)?; + let mut block = Block::new(&prev, txs, next_header_info.clone().difficulty, reward) + .map_err(|e| chain::Error::Block(e))?; block.header.timestamp = prev.timestamp + Duration::seconds(60); block.header.pow.secondary_scaling = next_header_info.secondary_scaling; @@ -66,6 +64,9 @@ where chain.set_prev_root_only(&mut block.header)?; // Manually set the mmr sizes for a "valid" block (increment prev output and kernel counts). + // The 2 lines below were bogus before when using 1-based positions. + // They worked only for even output_mmr_count()s + // But it was actually correct for 0-based position! block.header.output_mmr_size = pmmr::insertion_to_pmmr_index(prev.output_mmr_count() + 1); block.header.kernel_mmr_size = pmmr::insertion_to_pmmr_index(prev.kernel_mmr_count() + 1); } else { @@ -111,7 +112,9 @@ fn process_block_cut_through() -> Result<(), chain::Error> { // Note: We reuse key_ids resulting in an input and an output sharing the same commitment. // The input is coinbase and the output is plain. let tx = build::transaction( - KernelFeatures::Plain { fee: 0 }, + KernelFeatures::Plain { + fee: FeeFields::zero(), + }, &[ build::coinbase_input(consensus::MWC_FIRST_GROUP_REWARD, key_id1.clone()), build::coinbase_input(consensus::MWC_FIRST_GROUP_REWARD, key_id2.clone()), @@ -137,18 +140,17 @@ fn process_block_cut_through() -> Result<(), chain::Error> { .iter() .any(|output| output.commitment() == commit)); - let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); - // Transaction is invalid due to cut-through. + let height = 7; assert_eq!( - tx.validate(Weighting::AsTransaction, verifier_cache.clone()), + tx.validate(Weighting::AsTransaction, height), Err(transaction::Error::CutThrough), ); // Transaction will not validate against the chain (utxo). assert_eq!( - chain.validate_tx(&tx).map_err(|e| e.kind()), - Err(chain::ErrorKind::DuplicateCommitment(commit)), + chain.validate_tx(&tx), + Err(chain::Error::DuplicateCommitment(commit)), ); // Build a block with this single invalid transaction. @@ -157,7 +159,7 @@ fn process_block_cut_through() -> Result<(), chain::Error> { // The block is invalid due to cut-through. let prev = chain.head_header()?; assert_eq!( - block.validate(&prev.total_kernel_offset(), verifier_cache), + block.validate(&prev.total_kernel_offset()), Err(block::Error::Transaction(transaction::Error::CutThrough)) ); @@ -177,12 +179,12 @@ fn process_block_cut_through() -> Result<(), chain::Error> { let batch = store.batch()?; let mut ctx = chain.new_ctx(Options::NONE, batch, &mut header_pmmr, &mut txhashset)?; - let res = pipe::process_block(&block, &mut ctx).map_err(|e| e.kind()); + let res = pipe::process_block(&block, &mut ctx); assert_eq!( res, - Err(chain::ErrorKind::InvalidBlockProof( - block::Error::Transaction(transaction::Error::CutThrough) - )) + Err(chain::Error::Block(block::Error::Transaction( + transaction::Error::CutThrough + ))) ); } diff --git a/chain/tests/store_indices.rs b/chain/tests/store_indices.rs index 6443063b7c..c0ad867f46 100644 --- a/chain/tests/store_indices.rs +++ b/chain/tests/store_indices.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/chain/tests/store_kernel_pos_index.rs b/chain/tests/store_kernel_pos_index.rs index 1b74502c52..664e2818c4 100644 --- a/chain/tests/store_kernel_pos_index.rs +++ b/chain/tests/store_kernel_pos_index.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/chain/tests/test_block_known.rs b/chain/tests/test_block_known.rs index 937756a6f5..bdab3a818f 100644 --- a/chain/tests/test_block_known.rs +++ b/chain/tests/test_block_known.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ mod chain_test_helper; use self::chain_test_helper::{clean_output_dir, init_chain, mine_chain}; -use chain::ErrorKind; +use chain::Error; use chain::Tip; use grin_chain as chain; use grin_core::core::hash::Hashed; @@ -42,8 +42,8 @@ fn check_known() { let chain = init_chain(chain_dir, genesis.clone()); let res = chain.process_block(latest.clone(), chain::Options::NONE); assert_eq!( - res.unwrap_err().kind(), - ErrorKind::Unfit("duplicate block".to_string()).into() + res.unwrap_err(), + Error::Unfit("duplicate block".to_string()) ); } @@ -52,8 +52,8 @@ fn check_known() { let chain = init_chain(chain_dir, genesis.clone()); let res = chain.process_block(genesis.clone(), chain::Options::NONE); assert_eq!( - res.unwrap_err().kind(), - ErrorKind::Unfit("duplicate block".to_string()).into() + res.unwrap_err(), + Error::Unfit("duplicate block".to_string()) ); } diff --git a/chain/tests/test_coinbase_maturity.rs b/chain/tests/test_coinbase_maturity.rs index 298c2a0e4a..90c2912b09 100644 --- a/chain/tests/test_coinbase_maturity.rs +++ b/chain/tests/test_coinbase_maturity.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -13,17 +13,13 @@ // limitations under the License. use self::chain::types::NoopAdapter; -use self::chain::ErrorKind; -use self::core::core::verifier_cache::LruVerifierCache; +use self::chain::Error; use self::core::core::KernelFeatures; use self::core::global::{self, ChainTypes}; use self::core::libtx::{self, build, ProofBuilder}; -use self::core::pow::Difficulty; use self::core::{consensus, pow}; use self::keychain::{ExtKeychain, ExtKeychainPath, Keychain}; -use self::util::RwLock; use chrono::Duration; -use env_logger; use grin_chain as chain; use grin_core as core; use grin_keychain as keychain; @@ -37,22 +33,19 @@ fn clean_output_dir(dir_name: &str) { #[test] fn test_coinbase_maturity() { - let _ = env_logger::init(); + util::init_test_logger(); let chain_dir = ".mwc_coinbase"; clean_output_dir(chain_dir); global::set_local_chain_type(ChainTypes::AutomatedTesting); let genesis_block = pow::mine_genesis_block().unwrap(); - let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); - { let chain = chain::Chain::init( chain_dir.to_string(), Arc::new(NoopAdapter {}), genesis_block, pow::verify_size, - verifier_cache, false, ) .unwrap(); @@ -66,9 +59,11 @@ fn test_coinbase_maturity() { let key_id3 = ExtKeychainPath::new(1, 3, 0, 0, 0).to_identifier(); let key_id4 = ExtKeychainPath::new(1, 4, 0, 0, 0).to_identifier(); - let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap()); + let next_header_info = + consensus::next_difficulty(prev.height + 1, chain.difficulty_iter().unwrap()); let reward = libtx::reward::output(&keychain, &builder, &key_id1, 0, false, 1).unwrap(); - let mut block = core::core::Block::new(&prev, &[], Difficulty::min(), reward).unwrap(); + let mut block = + core::core::Block::new(&prev, &[], next_header_info.difficulty, reward).unwrap(); block.header.timestamp = prev.timestamp + Duration::seconds(60); block.header.pow.secondary_scaling = next_header_info.secondary_scaling; @@ -100,7 +95,7 @@ fn test_coinbase_maturity() { // here we build a tx that attempts to spend the earlier coinbase output // this is not a valid tx as the coinbase output cannot be spent yet let coinbase_txn = build::transaction( - KernelFeatures::Plain { fee: 2 }, + KernelFeatures::Plain { fee: 2.into() }, &[ build::coinbase_input(amount, key_id1.clone()), build::output(amount - 2, key_id2.clone()), @@ -111,10 +106,14 @@ fn test_coinbase_maturity() { .unwrap(); let txs = &[coinbase_txn.clone()]; - let fees = txs.iter().map(|tx| tx.fee()).sum(); - let reward = libtx::reward::output(&keychain, &builder, &key_id3, fees, false, 1).unwrap(); - let mut block = core::core::Block::new(&prev, txs, Difficulty::min(), reward).unwrap(); - let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap()); + let fees = txs.iter().map(|tx| tx.fee(prev.height + 1)).sum(); + let reward = + libtx::reward::output(&keychain, &builder, &key_id3, fees, false, prev.height + 1) + .unwrap(); + let next_header_info = + consensus::next_difficulty(prev.height + 1, chain.difficulty_iter().unwrap()); + let mut block = + core::core::Block::new(&prev, txs, next_header_info.difficulty, reward).unwrap(); block.header.timestamp = prev.timestamp + Duration::seconds(60); block.header.pow.secondary_scaling = next_header_info.secondary_scaling; @@ -124,8 +123,8 @@ fn test_coinbase_maturity() { // is not valid at the current block height given the current chain state. match chain.verify_coinbase_maturity(&coinbase_txn.inputs()) { Ok(_) => {} - Err(e) => match e.kind() { - ErrorKind::ImmatureCoinbase => {} + Err(e) => match e { + Error::ImmatureCoinbase => {} _ => panic!("Expected transaction error with immature coinbase."), }, } @@ -147,9 +146,13 @@ fn test_coinbase_maturity() { let builder = ProofBuilder::new(&keychain); let key_id1 = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier(); - let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap()); - let reward = libtx::reward::output(&keychain, &builder, &key_id1, 0, false, 1).unwrap(); - let mut block = core::core::Block::new(&prev, &[], Difficulty::min(), reward).unwrap(); + let next_header_info = + consensus::next_difficulty(prev.height + 1, chain.difficulty_iter().unwrap()); + let reward = + libtx::reward::output(&keychain, &builder, &key_id1, 0, false, prev.height + 1) + .unwrap(); + let mut block = + core::core::Block::new(&prev, &[], next_header_info.difficulty, reward).unwrap(); block.header.timestamp = prev.timestamp + Duration::seconds(60); block.header.pow.secondary_scaling = next_header_info.secondary_scaling; @@ -182,7 +185,7 @@ fn test_coinbase_maturity() { // here we build a tx that attempts to spend the earlier coinbase output // this is not a valid tx as the coinbase output cannot be spent yet let coinbase_txn = build::transaction( - KernelFeatures::Plain { fee: 2 }, + KernelFeatures::Plain { fee: 2.into() }, &[ build::coinbase_input(amount, key_id1.clone()), build::output(amount - 2, key_id2.clone()), @@ -193,11 +196,14 @@ fn test_coinbase_maturity() { .unwrap(); let txs = &[coinbase_txn.clone()]; - let fees = txs.iter().map(|tx| tx.fee()).sum(); + let fees = txs.iter().map(|tx| tx.fee(prev.height + 1)).sum(); let reward = - libtx::reward::output(&keychain, &builder, &key_id3, fees, false, 1).unwrap(); - let mut block = core::core::Block::new(&prev, txs, Difficulty::min(), reward).unwrap(); - let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap()); + libtx::reward::output(&keychain, &builder, &key_id3, fees, false, prev.height + 1) + .unwrap(); + let next_header_info = + consensus::next_difficulty(prev.height + 1, chain.difficulty_iter().unwrap()); + let mut block = + core::core::Block::new(&prev, txs, next_header_info.difficulty, reward).unwrap(); block.header.timestamp = prev.timestamp + Duration::seconds(60); block.header.pow.secondary_scaling = next_header_info.secondary_scaling; @@ -207,8 +213,8 @@ fn test_coinbase_maturity() { // is not valid at the current block height given the current chain state. match chain.verify_coinbase_maturity(&coinbase_txn.inputs()) { Ok(_) => {} - Err(e) => match e.kind() { - ErrorKind::ImmatureCoinbase => {} + Err(e) => match e { + Error::ImmatureCoinbase => {} _ => panic!("Expected transaction error with immature coinbase."), }, } @@ -230,11 +236,14 @@ fn test_coinbase_maturity() { let builder = ProofBuilder::new(&keychain); let pk = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier(); - let reward = libtx::reward::output(&keychain, &builder, &pk, 0, false, 1).unwrap(); - let mut block = - core::core::Block::new(&prev, &[], Difficulty::min(), reward).unwrap(); + let reward = + libtx::reward::output(&keychain, &builder, &pk, 0, false, prev.height + 1) + .unwrap(); let next_header_info = - consensus::next_difficulty(1, chain.difficulty_iter().unwrap()); + consensus::next_difficulty(prev.height + 1, chain.difficulty_iter().unwrap()); + let mut block = + core::core::Block::new(&prev, &[], next_header_info.difficulty, reward) + .unwrap(); block.header.timestamp = prev.timestamp + Duration::seconds(60); block.header.pow.secondary_scaling = next_header_info.secondary_scaling; @@ -260,11 +269,14 @@ fn test_coinbase_maturity() { .unwrap(); let txs = &[coinbase_txn]; - let fees = txs.iter().map(|tx| tx.fee()).sum(); - let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap()); + let fees = txs.iter().map(|tx| tx.fee(prev.height + 1)).sum(); + let next_header_info = + consensus::next_difficulty(prev.height + 1, chain.difficulty_iter().unwrap()); let reward = - libtx::reward::output(&keychain, &builder, &key_id4, fees, false, 1).unwrap(); - let mut block = core::core::Block::new(&prev, txs, Difficulty::min(), reward).unwrap(); + libtx::reward::output(&keychain, &builder, &key_id4, fees, false, prev.height + 1) + .unwrap(); + let mut block = + core::core::Block::new(&prev, txs, next_header_info.difficulty, reward).unwrap(); block.header.timestamp = prev.timestamp + Duration::seconds(60); block.header.pow.secondary_scaling = next_header_info.secondary_scaling; diff --git a/chain/tests/test_data/chain_compacted/header/header_head/pmmr_data.bin b/chain/tests/test_data/chain_compacted/header/header_head/pmmr_data.bin new file mode 100644 index 0000000000..fc4b2f52e3 Binary files /dev/null and b/chain/tests/test_data/chain_compacted/header/header_head/pmmr_data.bin differ diff --git a/chain/tests/test_data/chain_compacted/header/header_head/pmmr_hash.bin b/chain/tests/test_data/chain_compacted/header/header_head/pmmr_hash.bin new file mode 100644 index 0000000000..741ecfb1ec Binary files /dev/null and b/chain/tests/test_data/chain_compacted/header/header_head/pmmr_hash.bin differ diff --git a/chain/tests/test_data/chain_compacted/lmdb/data.mdb b/chain/tests/test_data/chain_compacted/lmdb/data.mdb new file mode 100644 index 0000000000..e69de29bb2 diff --git a/chain/tests/test_data/chain_compacted/lmdb/lock.mdb b/chain/tests/test_data/chain_compacted/lmdb/lock.mdb new file mode 100644 index 0000000000..e69de29bb2 diff --git a/chain/tests/test_data/chain_compacted/txhashset/kernel/pmmr_data.bin b/chain/tests/test_data/chain_compacted/txhashset/kernel/pmmr_data.bin new file mode 100644 index 0000000000..4e24d88ac6 Binary files /dev/null and b/chain/tests/test_data/chain_compacted/txhashset/kernel/pmmr_data.bin differ diff --git a/chain/tests/test_data/chain_compacted/txhashset/kernel/pmmr_hash.bin b/chain/tests/test_data/chain_compacted/txhashset/kernel/pmmr_hash.bin new file mode 100644 index 0000000000..6202e40e98 Binary files /dev/null and b/chain/tests/test_data/chain_compacted/txhashset/kernel/pmmr_hash.bin differ diff --git a/chain/tests/test_data/chain_compacted/txhashset/kernel/pmmr_size.bin b/chain/tests/test_data/chain_compacted/txhashset/kernel/pmmr_size.bin new file mode 100644 index 0000000000..bba2b62391 Binary files /dev/null and b/chain/tests/test_data/chain_compacted/txhashset/kernel/pmmr_size.bin differ diff --git a/chain/tests/test_data/chain_compacted/txhashset/output/pmmr_data.bin b/chain/tests/test_data/chain_compacted/txhashset/output/pmmr_data.bin new file mode 100644 index 0000000000..eb7aaec6af Binary files /dev/null and b/chain/tests/test_data/chain_compacted/txhashset/output/pmmr_data.bin differ diff --git a/chain/tests/test_data/chain_compacted/txhashset/output/pmmr_hash.bin b/chain/tests/test_data/chain_compacted/txhashset/output/pmmr_hash.bin new file mode 100644 index 0000000000..cadd9a2d37 Binary files /dev/null and b/chain/tests/test_data/chain_compacted/txhashset/output/pmmr_hash.bin differ diff --git a/chain/tests/test_data/chain_compacted/txhashset/output/pmmr_leaf.bin b/chain/tests/test_data/chain_compacted/txhashset/output/pmmr_leaf.bin new file mode 100644 index 0000000000..c8bfd9c9c8 Binary files /dev/null and b/chain/tests/test_data/chain_compacted/txhashset/output/pmmr_leaf.bin differ diff --git a/chain/tests/test_data/chain_compacted/txhashset/output/pmmr_prun.bin b/chain/tests/test_data/chain_compacted/txhashset/output/pmmr_prun.bin new file mode 100644 index 0000000000..4324337fb4 Binary files /dev/null and b/chain/tests/test_data/chain_compacted/txhashset/output/pmmr_prun.bin differ diff --git a/chain/tests/test_data/chain_compacted/txhashset/rangeproof/pmmr_data.bin b/chain/tests/test_data/chain_compacted/txhashset/rangeproof/pmmr_data.bin new file mode 100644 index 0000000000..7a5f95c7f8 Binary files /dev/null and b/chain/tests/test_data/chain_compacted/txhashset/rangeproof/pmmr_data.bin differ diff --git a/chain/tests/test_data/chain_compacted/txhashset/rangeproof/pmmr_hash.bin b/chain/tests/test_data/chain_compacted/txhashset/rangeproof/pmmr_hash.bin new file mode 100644 index 0000000000..75f2933581 Binary files /dev/null and b/chain/tests/test_data/chain_compacted/txhashset/rangeproof/pmmr_hash.bin differ diff --git a/chain/tests/test_data/chain_compacted/txhashset/rangeproof/pmmr_leaf.bin b/chain/tests/test_data/chain_compacted/txhashset/rangeproof/pmmr_leaf.bin new file mode 100644 index 0000000000..c8bfd9c9c8 Binary files /dev/null and b/chain/tests/test_data/chain_compacted/txhashset/rangeproof/pmmr_leaf.bin differ diff --git a/chain/tests/test_data/chain_compacted/txhashset/rangeproof/pmmr_prun.bin b/chain/tests/test_data/chain_compacted/txhashset/rangeproof/pmmr_prun.bin new file mode 100644 index 0000000000..4324337fb4 Binary files /dev/null and b/chain/tests/test_data/chain_compacted/txhashset/rangeproof/pmmr_prun.bin differ diff --git a/chain/tests/test_data/chain_raw/header/header_head/pmmr_data.bin b/chain/tests/test_data/chain_raw/header/header_head/pmmr_data.bin new file mode 100644 index 0000000000..fc4b2f52e3 Binary files /dev/null and b/chain/tests/test_data/chain_raw/header/header_head/pmmr_data.bin differ diff --git a/chain/tests/test_data/chain_raw/header/header_head/pmmr_hash.bin b/chain/tests/test_data/chain_raw/header/header_head/pmmr_hash.bin new file mode 100644 index 0000000000..741ecfb1ec Binary files /dev/null and b/chain/tests/test_data/chain_raw/header/header_head/pmmr_hash.bin differ diff --git a/chain/tests/test_data/chain_raw/lmdb/data.mdb b/chain/tests/test_data/chain_raw/lmdb/data.mdb new file mode 100644 index 0000000000..e69de29bb2 diff --git a/chain/tests/test_data/chain_raw/lmdb/lock.mdb b/chain/tests/test_data/chain_raw/lmdb/lock.mdb new file mode 100644 index 0000000000..e69de29bb2 diff --git a/chain/tests/test_data/chain_raw/txhashset/kernel/pmmr_data.bin b/chain/tests/test_data/chain_raw/txhashset/kernel/pmmr_data.bin new file mode 100644 index 0000000000..4e24d88ac6 Binary files /dev/null and b/chain/tests/test_data/chain_raw/txhashset/kernel/pmmr_data.bin differ diff --git a/chain/tests/test_data/chain_raw/txhashset/kernel/pmmr_hash.bin b/chain/tests/test_data/chain_raw/txhashset/kernel/pmmr_hash.bin new file mode 100644 index 0000000000..6202e40e98 Binary files /dev/null and b/chain/tests/test_data/chain_raw/txhashset/kernel/pmmr_hash.bin differ diff --git a/chain/tests/test_data/chain_raw/txhashset/kernel/pmmr_size.bin b/chain/tests/test_data/chain_raw/txhashset/kernel/pmmr_size.bin new file mode 100644 index 0000000000..bba2b62391 Binary files /dev/null and b/chain/tests/test_data/chain_raw/txhashset/kernel/pmmr_size.bin differ diff --git a/chain/tests/test_data/chain_raw/txhashset/output/pmmr_data.bin b/chain/tests/test_data/chain_raw/txhashset/output/pmmr_data.bin new file mode 100644 index 0000000000..eb7aaec6af Binary files /dev/null and b/chain/tests/test_data/chain_raw/txhashset/output/pmmr_data.bin differ diff --git a/chain/tests/test_data/chain_raw/txhashset/output/pmmr_hash.bin b/chain/tests/test_data/chain_raw/txhashset/output/pmmr_hash.bin new file mode 100644 index 0000000000..cadd9a2d37 Binary files /dev/null and b/chain/tests/test_data/chain_raw/txhashset/output/pmmr_hash.bin differ diff --git a/chain/tests/test_data/chain_raw/txhashset/output/pmmr_leaf.bin b/chain/tests/test_data/chain_raw/txhashset/output/pmmr_leaf.bin new file mode 100644 index 0000000000..c8bfd9c9c8 Binary files /dev/null and b/chain/tests/test_data/chain_raw/txhashset/output/pmmr_leaf.bin differ diff --git a/chain/tests/test_data/chain_raw/txhashset/rangeproof/pmmr_data.bin b/chain/tests/test_data/chain_raw/txhashset/rangeproof/pmmr_data.bin new file mode 100644 index 0000000000..7a5f95c7f8 Binary files /dev/null and b/chain/tests/test_data/chain_raw/txhashset/rangeproof/pmmr_data.bin differ diff --git a/chain/tests/test_data/chain_raw/txhashset/rangeproof/pmmr_hash.bin b/chain/tests/test_data/chain_raw/txhashset/rangeproof/pmmr_hash.bin new file mode 100644 index 0000000000..75f2933581 Binary files /dev/null and b/chain/tests/test_data/chain_raw/txhashset/rangeproof/pmmr_hash.bin differ diff --git a/chain/tests/test_data/chain_raw/txhashset/rangeproof/pmmr_leaf.bin b/chain/tests/test_data/chain_raw/txhashset/rangeproof/pmmr_leaf.bin new file mode 100644 index 0000000000..c8bfd9c9c8 Binary files /dev/null and b/chain/tests/test_data/chain_raw/txhashset/rangeproof/pmmr_leaf.bin differ diff --git a/chain/tests/test_get_kernel_height.rs b/chain/tests/test_get_kernel_height.rs index 4112c45db9..9a9e976cdc 100644 --- a/chain/tests/test_get_kernel_height.rs +++ b/chain/tests/test_get_kernel_height.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/chain/tests/test_header_perf.rs b/chain/tests/test_header_perf.rs new file mode 100644 index 0000000000..f948606b97 --- /dev/null +++ b/chain/tests/test_header_perf.rs @@ -0,0 +1,119 @@ +// Copyright 2021 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use grin_chain as chain; +use grin_core as core; +use grin_util as util; + +#[macro_use] +extern crate log; + +use std::sync::Arc; + +use crate::chain::types::{NoopAdapter, Options}; +use crate::core::core::hash::Hashed; +use crate::core::{genesis, global, pow}; + +use self::chain_test_helper::clean_output_dir; + +mod chain_test_helper; + +fn test_header_perf_impl(is_test_chain: bool, src_root_dir: &str, dest_root_dir: &str) { + global::set_local_chain_type(global::ChainTypes::Mainnet); + let mut genesis = genesis::genesis_main(); + + if is_test_chain { + global::set_local_chain_type(global::ChainTypes::AutomatedTesting); + genesis = pow::mine_genesis_block().unwrap(); + } + + { + debug!("Reading Chain, genesis block: {}", genesis.hash()); + let dummy_adapter = Arc::new(NoopAdapter {}); + + // The original chain we're reading from + let src_chain = Arc::new( + chain::Chain::init( + src_root_dir.into(), + dummy_adapter.clone(), + genesis.clone(), + pow::verify_size, + false, + ) + .unwrap(), + ); + + // And the output chain we're writing to + let dest_chain = Arc::new( + chain::Chain::init( + dest_root_dir.into(), + dummy_adapter, + genesis.clone(), + pow::verify_size, + false, + ) + .unwrap(), + ); + + let sh = src_chain.get_header_by_height(0).unwrap(); + debug!("Source Genesis - {}", sh.hash()); + + let dh = dest_chain.get_header_by_height(0).unwrap(); + debug!("Destination Genesis - {}", dh.hash()); + + let horizon_header = src_chain.txhashset_archive_header().unwrap(); + + debug!("Horizon header: {:?}", horizon_header); + + // Copy the headers from source to output in chunks + let dest_sync_head = dest_chain.header_head().unwrap(); + let copy_chunk_size = 1000; + let mut copied_header_index = 1; + let mut src_headers = vec![]; + while copied_header_index <= 100000 { + let h = src_chain.get_header_by_height(copied_header_index).unwrap(); + src_headers.push(h); + copied_header_index += 1; + if copied_header_index % copy_chunk_size == 0 { + debug!( + "Copying headers to {} of {}", + copied_header_index, horizon_header.height + ); + dest_chain + .sync_block_headers(&src_headers, dest_sync_head, Options::NONE) + .unwrap(); + src_headers = vec![]; + } + } + if !src_headers.is_empty() { + dest_chain + .sync_block_headers(&src_headers, dest_sync_head, Options::NONE) + .unwrap(); + } + } +} + +#[test] +#[ignore] +// Ignored during CI, but use this to run this test on a real instance of a chain pointed where you like +fn test_header_perf() { + util::init_test_logger(); + // if testing against a real chain, insert location here + // NOTE: Modify to point at your own paths + let src_root_dir = format!("/Users/yeastplume/Projects/grin_project/server/chain_data"); + let dest_root_dir = format!("/Users/yeastplume/Projects/grin_project/server/.chain_data_copy"); + clean_output_dir(&dest_root_dir); + test_header_perf_impl(false, &src_root_dir, &dest_root_dir); + clean_output_dir(&dest_root_dir); +} diff --git a/chain/tests/test_header_weight_validation.rs b/chain/tests/test_header_weight_validation.rs index 25e47412c7..113103bfaa 100644 --- a/chain/tests/test_header_weight_validation.rs +++ b/chain/tests/test_header_weight_validation.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,7 +19,7 @@ use grin_keychain as keychain; mod chain_test_helper; use self::chain_test_helper::{clean_output_dir, mine_chain}; -use crate::chain::{Chain, ErrorKind, Options}; +use crate::chain::{Chain, Error, Options}; use crate::core::{ consensus, core::{block, Block}, @@ -36,8 +36,15 @@ fn build_block(chain: &Chain) -> Block { let prev = chain.head_header().unwrap(); let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap()); - let reward = - reward::output(&keychain, &ProofBuilder::new(&keychain), &pk, 0, false, 1).unwrap(); + let reward = reward::output( + &keychain, + &ProofBuilder::new(&keychain), + &pk, + 0, + false, + prev.height + 1, + ) + .unwrap(); let mut block = Block::new(&prev, &[], next_header_info.clone().difficulty, reward).unwrap(); block.header.timestamp = prev.timestamp + Duration::seconds(60); @@ -72,12 +79,10 @@ fn test_header_weight_validation() { // Note: We will validate this even if just processing the header. header.output_mmr_size = 1_000; - let res = chain - .process_block_header(&header, Options::NONE) - .map_err(|e| e.kind()); + let res = chain.process_block_header(&header, Options::NONE); // Weight validation is done via transaction body and results in a slightly counter-intuitive tx error. - assert_eq!(res, Err(ErrorKind::Block(block::Error::TooHeavy))); + assert_eq!(res, Err(Error::Block(block::Error::TooHeavy))); clean_output_dir(chain_dir); } diff --git a/chain/tests/test_pibd_copy.rs b/chain/tests/test_pibd_copy.rs new file mode 100644 index 0000000000..eebb4cd390 --- /dev/null +++ b/chain/tests/test_pibd_copy.rs @@ -0,0 +1,350 @@ +// Copyright 2021 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use grin_chain as chain; +use grin_core as core; +use grin_util as util; + +#[macro_use] +extern crate log; + +use std::path::Path; +use std::sync::Arc; +use std::{fs, io}; + +use crate::chain::txhashset::BitmapChunk; +use crate::chain::types::{NoopAdapter, Options}; +use crate::core::core::{ + hash::{Hash, Hashed}, + pmmr::segment::{Segment, SegmentIdentifier, SegmentType}, + Block, OutputIdentifier, TxKernel, +}; +use crate::core::{genesis, global, pow}; +use crate::util::secp::pedersen::RangeProof; + +use self::chain_test_helper::clean_output_dir; + +mod chain_test_helper; + +fn copy_dir_all(src: impl AsRef, dst: impl AsRef) -> io::Result<()> { + fs::create_dir_all(&dst)?; + for entry in fs::read_dir(src)? { + let entry = entry?; + let ty = entry.file_type()?; + if ty.is_dir() { + copy_dir_all(entry.path(), dst.as_ref().join(entry.file_name()))?; + } else { + fs::copy(entry.path(), dst.as_ref().join(entry.file_name()))?; + } + } + Ok(()) +} + +// Canned segmenter responder, which will simulate feeding back segments as requested +// by the desegmenter +struct SegmenterResponder { + chain: Arc, +} + +impl SegmenterResponder { + pub fn new(chain_src_dir: &str, genesis: Block) -> Self { + let dummy_adapter = Arc::new(NoopAdapter {}); + debug!( + "Reading SegmenterResponder chain, genesis block: {}", + genesis.hash() + ); + + // The original chain we're reading from + let res = SegmenterResponder { + chain: Arc::new( + chain::Chain::init( + chain_src_dir.into(), + dummy_adapter.clone(), + genesis, + pow::verify_size, + false, + ) + .unwrap(), + ), + }; + let sh = res.chain.get_header_by_height(0).unwrap(); + debug!("Source Genesis - {}", sh.hash()); + res + } + + pub fn chain(&self) -> Arc { + self.chain.clone() + } + + pub fn get_bitmap_root_hash(&self) -> Hash { + self.chain.segmenter().unwrap().bitmap_root().unwrap() + } + + pub fn get_bitmap_segment(&self, seg_id: SegmentIdentifier) -> Segment { + let segmenter = self.chain.segmenter().unwrap(); + segmenter.bitmap_segment(seg_id).unwrap() + } + + pub fn get_output_segment(&self, seg_id: SegmentIdentifier) -> Segment { + let segmenter = self.chain.segmenter().unwrap(); + segmenter.output_segment(seg_id).unwrap() + } + + pub fn get_rangeproof_segment(&self, seg_id: SegmentIdentifier) -> Segment { + let segmenter = self.chain.segmenter().unwrap(); + segmenter.rangeproof_segment(seg_id).unwrap() + } + + pub fn get_kernel_segment(&self, seg_id: SegmentIdentifier) -> Segment { + let segmenter = self.chain.segmenter().unwrap(); + segmenter.kernel_segment(seg_id).unwrap() + } +} + +// Canned segmenter 'peer', building up its local chain from requested PIBD segments +struct DesegmenterRequestor { + chain: Arc, + responder: Arc, +} + +impl DesegmenterRequestor { + pub fn new(chain_src_dir: &str, genesis: Block, responder: Arc) -> Self { + let dummy_adapter = Arc::new(NoopAdapter {}); + debug!( + "Reading DesegmenterRequestor chain, genesis block: {}", + genesis.hash() + ); + + // The original chain we're reading from + let res = DesegmenterRequestor { + chain: Arc::new( + chain::Chain::init( + chain_src_dir.into(), + dummy_adapter.clone(), + genesis, + pow::verify_size, + false, + ) + .unwrap(), + ), + responder, + }; + let sh = res.chain.get_header_by_height(0).unwrap(); + debug!("Dest Genesis - {}", sh.hash()); + res + } + + /// Copy headers, hopefully bringing the requestor to a state where PIBD is the next step + pub fn copy_headers_from_responder(&mut self) { + let src_chain = self.responder.chain(); + let tip = src_chain.header_head().unwrap(); + let dest_sync_head = self.chain.header_head().unwrap(); + let copy_chunk_size = 1000; + let mut copied_header_index = 1; + let mut src_headers = vec![]; + while copied_header_index <= tip.height { + let h = src_chain.get_header_by_height(copied_header_index).unwrap(); + src_headers.push(h); + copied_header_index += 1; + if copied_header_index % copy_chunk_size == 0 { + debug!( + "Copying headers to {} of {}", + copied_header_index, tip.height + ); + self.chain + .sync_block_headers(&src_headers, dest_sync_head, Options::SKIP_POW) + .unwrap(); + src_headers = vec![]; + } + } + if !src_headers.is_empty() { + self.chain + .sync_block_headers(&src_headers, dest_sync_head, Options::NONE) + .unwrap(); + } + } + + pub fn init_desegmenter(&mut self, bitmap_root_hash: Hash) { + let archive_header = self.chain.txhashset_archive_header_header_only().unwrap(); + self.chain + .create_desegmenter(&archive_header, bitmap_root_hash) + .unwrap(); + } + + // Emulate `continue_pibd` function, which would be called from state sync + // return whether is complete + pub fn continue_pibd(&mut self, bitmap_root_hash: Hash) -> bool { + let archive_header = self.chain.txhashset_archive_header_header_only().unwrap(); + let desegmenter = self.chain.get_desegmenter(&archive_header); + + // Apply segments... TODO: figure out how this should be called, might + // need to be a separate thread. + if let Some(mut de) = desegmenter.try_write() { + if let Some(d) = de.as_mut() { + d.apply_next_segments().unwrap(); + } + } + + let mut next_segment_ids = vec![]; + let mut is_complete = false; + if let Some(d) = desegmenter.write().as_mut() { + // Figure out the next segments we need + // (12 is divisible by 3, to try and evenly spread the requests among the 3 + // main pmmrs. Bitmaps segments will always be requested first) + next_segment_ids = d.next_desired_segments(12).unwrap(); + is_complete = d.is_complete() + } + + debug!("Next segment IDS: {:?}", next_segment_ids); + + // For each segment, pick a desirable peer and send message + for seg_id in next_segment_ids.iter() { + // Perform request and response + match seg_id.segment_type { + SegmentType::Bitmap => { + let seg = self.responder.get_bitmap_segment(seg_id.identifier.clone()); + if let Some(d) = desegmenter.write().as_mut() { + d.add_bitmap_segment(seg, bitmap_root_hash).unwrap(); + } + } + SegmentType::Output => { + let seg = self.responder.get_output_segment(seg_id.identifier.clone()); + if let Some(d) = desegmenter.write().as_mut() { + d.add_output_segment(seg, bitmap_root_hash).unwrap(); + } + } + SegmentType::RangeProof => { + let seg = self + .responder + .get_rangeproof_segment(seg_id.identifier.clone()); + if let Some(d) = desegmenter.write().as_mut() { + d.add_rangeproof_segment(seg, bitmap_root_hash).unwrap(); + } + } + SegmentType::Kernel => { + let seg = self.responder.get_kernel_segment(seg_id.identifier.clone()); + if let Some(d) = desegmenter.write().as_mut() { + d.add_kernel_segment(seg, bitmap_root_hash).unwrap(); + } + } + }; + } + is_complete + } + + pub fn check_roots(&self) { + let roots = self.chain.txhashset().read().roots().unwrap(); + let archive_header = self.chain.txhashset_archive_header_header_only().unwrap(); + debug!("Archive Header is {:?}", archive_header); + debug!("TXHashset output root is {:?}", roots); + debug!("TXHashset merged output root is {:?}", roots.output_root); + assert_eq!(archive_header.range_proof_root, roots.rproof_root); + assert_eq!(archive_header.kernel_root, roots.kernel_root); + assert_eq!(archive_header.output_root, roots.output_root); + } +} +fn test_pibd_copy_impl( + is_test_chain: bool, + src_root_dir: &str, + dest_root_dir: &str, + dest_template_dir: Option<&str>, +) { + global::set_local_chain_type(global::ChainTypes::Floonet); + let mut genesis = genesis::genesis_floo(); + + if is_test_chain { + global::set_local_chain_type(global::ChainTypes::AutomatedTesting); + genesis = pow::mine_genesis_block().unwrap(); + } + + // Copy a starting point over for the destination, e.g. a copy of chain + // with all headers pre-applied + if let Some(td) = dest_template_dir { + debug!( + "Copying template dir for destination from {} to {}", + td, dest_root_dir + ); + copy_dir_all(td, dest_root_dir).unwrap(); + } + + let src_responder = Arc::new(SegmenterResponder::new(src_root_dir, genesis.clone())); + let bitmap_root_hash = src_responder.get_bitmap_root_hash(); + let mut dest_requestor = + DesegmenterRequestor::new(dest_root_dir, genesis.clone(), src_responder); + + // No template provided so copy headers from source + if dest_template_dir.is_none() { + dest_requestor.copy_headers_from_responder(); + if !is_test_chain { + return; + } + } + + dest_requestor.init_desegmenter(bitmap_root_hash); + + // Perform until desegmenter reports it's done + while !dest_requestor.continue_pibd(bitmap_root_hash) {} + + dest_requestor.check_roots(); +} + +#[test] +#[ignore] +fn test_pibd_copy_sample() { + util::init_test_logger(); + // Note there is now a 'test' in grin_wallet_controller/build_chain + // that can be manually tweaked to create a + // small test chain with actual transaction data + + // Test on uncompacted and non-compacted chains + let src_root_dir = format!("./tests/test_data/chain_raw"); + let dest_root_dir = format!("./tests/test_output/.segment_copy"); + clean_output_dir(&dest_root_dir); + test_pibd_copy_impl(true, &src_root_dir, &dest_root_dir, None); + let src_root_dir = format!("./tests/test_data/chain_compacted"); + clean_output_dir(&dest_root_dir); + test_pibd_copy_impl(true, &src_root_dir, &dest_root_dir, None); + clean_output_dir(&dest_root_dir); +} + +#[test] +#[ignore] +// Note this test is intended to be run manually, as testing the copy of an +// entire live chain is beyond the capability of current CI +// As above, but run on a real instance of a chain pointed where you like +fn test_pibd_copy_real() { + util::init_test_logger(); + // If set, just copy headers from source to target template dir and exit + // Used to set up a chain state simulating the start of PIBD to continue manual testing + let copy_headers_to_template = false; + + // if testing against a real chain, insert location here + let src_root_dir = format!("/Users/bay/.mwc/_floo/chain_data"); + let dest_template_dir = format!("/Users/bay/.mwc/_floo2/chain_data"); + let dest_root_dir = format!("/Users/bay/.mwc/_floo2/chain_data"); + if copy_headers_to_template { + clean_output_dir(&dest_template_dir); + test_pibd_copy_impl(false, &src_root_dir, &dest_template_dir, None); + } else { + clean_output_dir(&dest_root_dir); + test_pibd_copy_impl( + false, + &src_root_dir, + &dest_root_dir, + Some(&dest_template_dir), + ); + } + + //clean_output_dir(&dest_root_dir); +} diff --git a/chain/tests/test_pibd_validation.rs b/chain/tests/test_pibd_validation.rs new file mode 100644 index 0000000000..c713677b2f --- /dev/null +++ b/chain/tests/test_pibd_validation.rs @@ -0,0 +1,229 @@ +// Copyright 2021 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use grin_chain as chain; +use grin_core as core; +use grin_util as util; + +use std::sync::Arc; + +use crate::chain::txhashset::BitmapAccumulator; +use crate::chain::types::NoopAdapter; +use crate::core::core::pmmr; +use crate::core::core::{hash::Hashed, pmmr::segment::SegmentIdentifier}; +use crate::core::{genesis, global, pow}; + +use croaring::Bitmap; + +mod chain_test_helper; + +fn test_pibd_chain_validation_impl(is_test_chain: bool, src_root_dir: &str) { + global::set_local_chain_type(global::ChainTypes::Mainnet); + let mut genesis = genesis::genesis_main(); + // Height at which to read kernel segments (lower than thresholds defined in spec - for testing) + let mut target_segment_height = 11; + + if is_test_chain { + global::set_local_chain_type(global::ChainTypes::AutomatedTesting); + genesis = pow::mine_genesis_block().unwrap(); + target_segment_height = 3; + } + + { + println!("Reading Chain, genesis block: {}", genesis.hash()); + let dummy_adapter = Arc::new(NoopAdapter {}); + + // The original chain we're reading from + let src_chain = Arc::new( + chain::Chain::init( + src_root_dir.into(), + dummy_adapter.clone(), + genesis.clone(), + pow::verify_size, + false, + ) + .unwrap(), + ); + + // For test compaction purposes + /*src_chain.compact().unwrap(); + src_chain + .validate(true) + .expect("Source chain validation failed, stop");*/ + + let sh = src_chain.get_header_by_height(0).unwrap(); + println!("Source Genesis - {}", sh.hash()); + + let horizon_header = src_chain.txhashset_archive_header().unwrap(); + + println!("Horizon header: {:?}", horizon_header); + + // Copy the header from source to output + // Not necessary for this test, we're just validating the source + /*for h in 1..=horizon_height { + let h = src_chain.get_header_by_height(h).unwrap(); + dest_chain.process_block_header(&h, options).unwrap(); + }*/ + + // Init segmenter, (note this still has to be lazy init somewhere on a peer) + // This is going to use the same block as horizon_header + let segmenter = src_chain.segmenter().unwrap(); + + let bitmap_root = segmenter.bitmap_root().unwrap(); + println!( + "Bitmap segmenter reports output bitmap root hash is {:?}", + bitmap_root + ); + + // BITMAP - Read + Validate, Also recreate bitmap accumulator for target tx hash set + // Predict number of leaves (chunks) in the bitmap MMR from the number of outputs + let bitmap_mmr_num_leaves = + (pmmr::n_leaves(horizon_header.output_mmr_size) as f64 / 1024f64).ceil() as u64; + println!("BITMAP PMMR NUM_LEAVES: {}", bitmap_mmr_num_leaves); + + // And total size of the bitmap PMMR + let bitmap_pmmr_size = pmmr::peaks(bitmap_mmr_num_leaves) + .last() + .unwrap_or(&pmmr::insertion_to_pmmr_index(bitmap_mmr_num_leaves)) + .clone(); + println!("BITMAP PMMR SIZE: {}", bitmap_pmmr_size); + println!( + "Bitmap Segments required: {}", + SegmentIdentifier::count_segments_required(bitmap_pmmr_size, target_segment_height) + ); + // TODO: This can probably be derived from the PMMR we'll eventually be building + // (check if total size is equal to total size at horizon header) + let identifier_iter = + SegmentIdentifier::traversal_iter(bitmap_pmmr_size, target_segment_height); + + let mut bitmap_accumulator = BitmapAccumulator::new(); + // Raw bitmap for validation + let mut bitmap = Bitmap::new(); + let mut chunk_count = 0; + + for sid in identifier_iter { + println!("Getting bitmap segment with Segment Identifier {:?}", sid); + let bitmap_segment = segmenter.bitmap_segment(sid).unwrap(); + // Validate bitmap segment with provided output hash + if let Err(e) = bitmap_segment.validate( + bitmap_pmmr_size, // Last MMR pos at the height being validated, in this case of the bitmap root + None, + bitmap_root, + ) { + panic!("Unable to validate bitmap_root: {}", e); + } + + let (_sid, _hash_pos, _hashes, _leaf_pos, leaf_data, _proof) = bitmap_segment.parts(); + + // Add to raw bitmap to use in further validation + for chunk in leaf_data.iter() { + bitmap.add_many(&chunk.set_iter(chunk_count * 1024).collect::>()); + chunk_count += 1; + } + + // and append to bitmap accumulator + for chunk in leaf_data.into_iter() { + bitmap_accumulator.append_chunk(chunk).unwrap(); + } + } + + println!("Accumulator Root: {}", bitmap_accumulator.root()); + + // OUTPUTS - Read + Validate + let identifier_iter = SegmentIdentifier::traversal_iter( + horizon_header.output_mmr_size, + target_segment_height, + ); + + for sid in identifier_iter { + println!("Getting output segment with Segment Identifier {:?}", sid); + let output_segment = segmenter.output_segment(sid).unwrap(); + // Validate Output + if let Err(e) = output_segment.validate( + horizon_header.output_mmr_size, // Last MMR pos at the height being validated + Some(&bitmap), + horizon_header.output_root, // Output root we're checking for + ) { + panic!("Unable to validate output segment root: {}", e); + } + } + + // PROOFS - Read + Validate + let identifier_iter = SegmentIdentifier::traversal_iter( + horizon_header.output_mmr_size, + target_segment_height, + ); + + for sid in identifier_iter { + println!( + "Getting rangeproof segment with Segment Identifier {:?}", + sid + ); + let rangeproof_segment = segmenter.rangeproof_segment(sid).unwrap(); + // Validate Kernel segment (which does not require a bitmap) + if let Err(e) = rangeproof_segment.validate( + horizon_header.output_mmr_size, // Last MMR pos at the height being validated + Some(&bitmap), + horizon_header.range_proof_root, // Output root we're checking for + ) { + panic!("Unable to validate rangeproof segment root: {}", e); + } + } + + // KERNELS - Read + Validate + let identifier_iter = SegmentIdentifier::traversal_iter( + horizon_header.kernel_mmr_size, + target_segment_height, + ); + + for sid in identifier_iter { + println!("Getting kernel segment with Segment Identifier {:?}", sid); + let kernel_segment = segmenter.kernel_segment(sid).unwrap(); + // Validate Kernel segment (which does not require a bitmap) + if let Err(e) = kernel_segment.validate( + horizon_header.kernel_mmr_size, + None, + horizon_header.kernel_root, + ) { + panic!("Unable to validate kernel_segment root: {}", e); + } + } + } +} + +#[test] +// TODO: Fix before merge into master +#[ignore] +fn test_pibd_chain_validation_sample() { + util::init_test_logger(); + // Note there is now a 'test' in grin_wallet_controller/build_chain + // that can be manually tweaked to create a + // small test chain with actual transaction data + + // Test on uncompacted and non-compacted chains + let src_root_dir = format!("./tests/test_data/chain_raw"); + test_pibd_chain_validation_impl(true, &src_root_dir); + let src_root_dir = format!("./tests/test_data/chain_compacted"); + test_pibd_chain_validation_impl(true, &src_root_dir); +} + +#[test] +#[ignore] +// As above, but run on a real instance of a chain pointed where you like +fn test_pibd_chain_validation_real() { + util::init_test_logger(); + // if testing against a real chain, insert location here + let src_root_dir = format!("/Users/bay/.mwc/_floo/chain_data"); + test_pibd_chain_validation_impl(false, &src_root_dir); +} diff --git a/chain/tests/test_txhashset.rs b/chain/tests/test_txhashset.rs index 1132984e5c..e8d3141578 100644 --- a/chain/tests/test_txhashset.rs +++ b/chain/tests/test_txhashset.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/chain/tests/test_txhashset_archive.rs b/chain/tests/test_txhashset_archive.rs index 0b1336317c..67e2522038 100644 --- a/chain/tests/test_txhashset_archive.rs +++ b/chain/tests/test_txhashset_archive.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/config/Cargo.toml b/config/Cargo.toml index 215d180ca8..8bce031e91 100644 --- a/config/Cargo.toml +++ b/config/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "grin_config" -version = "4.4.2" +version = "5.3.2" authors = ["Grin Developers "] description = "Configuration for grin, a simple, private and scalable cryptocurrency implementation based on the MimbleWimble chain format." license = "Apache-2.0" @@ -15,13 +15,12 @@ serde = "1" serde_derive = "1" toml = "0.5" dirs = "2.0" -failure = "0.1" -failure_derive = "0.1" +thiserror = "1" -grin_core = { path = "../core", version = "4.4.2" } -grin_servers = { path = "../servers", version = "4.4.2" } -grin_p2p = { path = "../p2p", version = "4.4.2" } -grin_util = { path = "../util", version = "4.4.2" } +grin_core = { path = "../core", version = "5.3.2" } +grin_servers = { path = "../servers", version = "5.3.2" } +grin_p2p = { path = "../p2p", version = "5.3.2" } +grin_util = { path = "../util", version = "5.3.2" } [dev-dependencies] pretty_assertions = "0.6.1" diff --git a/config/src/comments.rs b/config/src/comments.rs index 7dce1e0be6..4e118e0553 100644 --- a/config/src/comments.rs +++ b/config/src/comments.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,7 +19,7 @@ use std::collections::HashMap; fn comments() -> HashMap { let mut retval = HashMap::new(); retval.insert( - "[server]".to_string(), + "config_file_version".to_string(), " # Generated Server Configuration File for MWC # @@ -31,7 +31,13 @@ fn comments() -> HashMap { # -[user home]/.mwc # -######################################### +" + .to_string(), + ); + + retval.insert( + "[server]".to_string(), + "######################################### ### SERVER CONFIGURATION ### ######################################### @@ -317,7 +323,7 @@ fn comments() -> HashMap { ); retval.insert( - "[server.p2p_config.capabilities]".to_string(), + "[server.pool_config]".to_string(), "#If the seeding type is List, the list of peers to connect to can #be specified as follows: #seeds = [\"192.168.0.1:3414\",\"192.168.0.2:3414\"] @@ -346,19 +352,9 @@ fn comments() -> HashMap { #amount of incoming connections temporarily allowed to exceed peer_max_inbound_count #peer_listener_buffer_count = 8 -# 15 = Bit flags for FULL_NODE -#This structure needs to be changed internally, to make it more configurable - # A preferred dandelion_peer, mainly used for testing dandelion # dandelion_peer = \"10.0.0.1:13144\" -" - .to_string(), - ); - - retval.insert( - "[server.pool_config]".to_string(), - " ######################################### ### MEMPOOL CONFIGURATION ### ######################################### diff --git a/config/src/config.rs b/config/src/config.rs index f4ea031a3c..d926a9564f 100644 --- a/config/src/config.rs +++ b/config/src/config.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,16 +14,13 @@ //! Configuration file management -use dirs; use rand::distributions::{Alphanumeric, Distribution}; use rand::thread_rng; use std::env; use std::fs::{self, File}; use std::io::prelude::*; use std::io::BufReader; -use std::io::Read; use std::path::PathBuf; -use toml; use crate::comments::insert_comments; use crate::core::global; @@ -143,6 +140,7 @@ pub fn initial_setup_server(chain_type: &global::ChainTypes) -> Result ConfigMembers { ConfigMembers { + config_file_version: Some(1), server: ServerConfig::default(), logging: Some(LoggingConfig::default()), } @@ -226,10 +224,14 @@ impl GlobalConfig { /// Read config fn read_config(mut self) -> Result { - let mut file = File::open(self.config_file_path.as_mut().unwrap())?; - let mut contents = String::new(); - file.read_to_string(&mut contents)?; - let fixed = GlobalConfig::fix_warning_level(contents); + let config_file_path = self.config_file_path.as_ref().unwrap(); + let contents = fs::read_to_string(config_file_path)?; + let migrated = GlobalConfig::migrate_config_file_version_none_to_1(contents.clone()); + if contents != migrated { + fs::write(config_file_path, &migrated)?; + } + + let fixed = GlobalConfig::fix_warning_level(migrated); let decoded: Result = toml::from_str(&fixed); match decoded { Ok(gc) => { @@ -251,10 +253,17 @@ impl GlobalConfig { let mut chain_path = grin_home.clone(); chain_path.push(GRIN_CHAIN_DIR); self.members.as_mut().unwrap().server.db_root = chain_path.to_str().unwrap().to_owned(); - let mut secret_path = grin_home.clone(); - secret_path.push(API_SECRET_FILE_NAME); + let mut api_secret_path = grin_home.clone(); + api_secret_path.push(API_SECRET_FILE_NAME); self.members.as_mut().unwrap().server.api_secret_path = - Some(secret_path.to_str().unwrap().to_owned()); + Some(api_secret_path.to_str().unwrap().to_owned()); + let mut foreign_api_secret_path = grin_home.clone(); + foreign_api_secret_path.push(FOREIGN_API_SECRET_FILE_NAME); + self.members + .as_mut() + .unwrap() + .server + .foreign_api_secret_path = Some(foreign_api_secret_path.to_str().unwrap().to_owned()); let mut log_path = grin_home.clone(); log_path.push(SERVER_LOG_FILE_NAME); self.members @@ -302,6 +311,46 @@ impl GlobalConfig { Ok(()) } + /// It is placeholder for the future migration. Please check how it is done at grin + /// MWC doesn't have anything to migrate yet + fn migrate_config_file_version_none_to_1(config_str: String) -> String { + // Parse existing config and return unchanged if not eligible for migration + + // Nothing to migrate in MWC. Keeping commented code as example + /*let mut config: ConfigMembers = + toml::from_str(&GlobalConfig::fix_warning_level(config_str.clone())).unwrap(); + if config.config_file_version != None { + return config_str; + } + + // Apply changes both textually and structurally + + let config_str = config_str.replace("\n#########################################\n### SERVER CONFIGURATION ###", "\nconfig_file_version = 2\n\n#########################################\n### SERVER CONFIGURATION ###"); + config.config_file_version = Some(2); + + let config_str = config_str.replace( + "\naccept_fee_base = 1000000\n", + "\naccept_fee_base = 500000\n", + ); + if config.server.pool_config.accept_fee_base == 1000000 { + config.server.pool_config.accept_fee_base = 500000; + } + + let config_str = config_str.replace( + "\n#a setting to 1000000 will be overridden to 500000 to respect the fixfees RFC\n", + "\n", + ); + + // Verify equivalence + + assert_eq!( + config, + toml::from_str(&GlobalConfig::fix_warning_level(config_str.clone())).unwrap() + );*/ + + config_str + } + // For forwards compatibility old config needs `Warning` log level changed to standard log::Level `WARN` fn fix_warning_level(conf: String) -> String { conf.replace("Warning", "WARN") diff --git a/config/src/lib.rs b/config/src/lib.rs index e1ef7de8a2..936198d29e 100644 --- a/config/src/lib.rs +++ b/config/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/config/src/types.rs b/config/src/types.rs index 0b83ac43d1..d4254008b4 100644 --- a/config/src/types.rs +++ b/config/src/types.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,7 +14,6 @@ //! Public types for config modules -use failure::Fail; use std::io; use std::path::PathBuf; @@ -22,22 +21,22 @@ use crate::servers::ServerConfig; use crate::util::logger::LoggingConfig; /// Error type wrapping config errors. -#[derive(Debug, Fail)] +#[derive(Debug, thiserror::Error)] pub enum ConfigError { /// Error with parsing of config file - #[fail(display = "Error parsing configuration file {}, {}", _0, _1)] + #[error("Error parsing configuration file {0}, {1}")] ParseError(String, String), /// Error with fileIO while reading config file - #[fail(display = "Node Config file {} IO error, {}", _0, _1)] + #[error("Node Config file {0} IO error, {1}")] FileIOError(String, String), /// No file found - #[fail(display = "Node Configuration file not found: {}", _0)] + #[error("Node Configuration file not found: {0}")] FileNotFoundError(String), /// Error serializing config values - #[fail(display = "Error serializing node configuration: {}", _0)] + #[error("Error serializing node configuration: {0}")] SerializationError(String), } @@ -71,6 +70,8 @@ pub struct GlobalConfig { /// want serialised or deserialised #[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] pub struct ConfigMembers { + /// Config file version (None == version 1) + pub config_file_version: Option, /// Server config #[serde(default)] pub server: ServerConfig, diff --git a/core/Cargo.toml b/core/Cargo.toml index fd47fd6ffb..c3aabec948 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "grin_core" -version = "4.4.2" +version = "5.3.2" authors = ["Grin Developers "] description = "Chain implementation for grin, a simple, private and scalable cryptocurrency implementation based on the MimbleWimble chain format." license = "Apache-2.0" @@ -12,10 +12,8 @@ edition = "2018" [dependencies] blake2 = { package = "blake2-rfc", version = "0.2"} byteorder = "1" -croaring = "1.0.1" +croaring = "1.1" enum_primitive = "0.1" -failure = "0.1" -failure_derive = "0.1" lazy_static = "1" lru-cache = "0.1" num = "0.2" @@ -24,12 +22,14 @@ rand = "0.6" serde = "1" serde_derive = "1" siphasher = "0.3" +thiserror = "1" log = "0.4" chrono = { version = "0.4.11", features = ["serde"] } zeroize = { version = "1.1", features =["zeroize_derive"] } +bytes = "0.5" -keychain = { package = "grin_keychain", path = "../keychain", version = "4.4.2" } -util = { package = "grin_util", path = "../util", version = "4.4.2" } +keychain = { package = "grin_keychain", path = "../keychain", version = "5.3.2" } +util = { package = "grin_util", path = "../util", version = "5.3.2" } [dev-dependencies] serde_json = "1" diff --git a/core/fuzz/Cargo.lock b/core/fuzz/Cargo.lock index b2a18b82b9..fd99dd3a62 100644 --- a/core/fuzz/Cargo.lock +++ b/core/fuzz/Cargo.lock @@ -1,1471 +1,1413 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. +version = 3 + +[[package]] +name = "addr2line" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" +dependencies = [ + "gimli", +] + [[package]] -name = "adler32" -version = "1.0.3" +name = "adler" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "aho-corasick" -version = "0.7.8" +version = "0.7.18" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" dependencies = [ - "memchr 2.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr", ] [[package]] name = "ansi_term" -version = "0.11.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" dependencies = [ - "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi", ] -[[package]] -name = "antidote" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "arbitrary" -version = "0.1.1" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a7924531f38b1970ff630f03eb20a2fde69db5c590c93b0f3482e95dcc5fd60" [[package]] -name = "arrayref" -version = "0.3.5" +name = "arc-swap" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dabe5a181f83789739c194cbe5a897dde195078fac08568d09221fd6137a7ba8" [[package]] name = "arrayvec" version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06f59fe10306bb78facd90d28c2038ad23ffaaefa85bac43c8a434cde383334f" dependencies = [ - "nodrop 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", - "odds 0.2.26 (registry+https://github.com/rust-lang/crates.io-index)", + "nodrop", + "odds", ] [[package]] name = "arrayvec" -version = "0.4.10" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd9fd44efafa8690358b7408d253adf110036b88f55672a933f01d616ad9b1b9" dependencies = [ - "nodrop 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "nodrop", ] [[package]] name = "atty" -version = "0.2.11" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "hermit-abi", + "libc", + "winapi", ] [[package]] name = "autocfg" -version = "0.1.2" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dde43e75fd43e8a1bf86103336bc699aa8d17ad1be60c76c0bdfd4828e19b78" +dependencies = [ + "autocfg 1.1.0", +] + +[[package]] +name = "autocfg" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "backtrace" -version = "0.3.14" +version = "0.3.66" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cab84319d616cfb654d03394f38ab7e6f0919e181b1b57e1fd15e7fb4077d9a7" dependencies = [ - "autocfg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "backtrace-sys 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", - "cfg-if 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-demangle 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "addr2line", + "cc", + "cfg-if 1.0.0", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", ] [[package]] -name = "backtrace-sys" -version = "0.1.28" +name = "base64" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cc 1.0.31 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", -] +checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" [[package]] -name = "base64" -version = "0.9.3" +name = "base64ct" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "safemem 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", -] +checksum = "3bdca834647821e0b13d9539a8634eb62d3501b6b6c2cec1722786ee6671b851" [[package]] name = "bindgen" -version = "0.52.0" +version = "0.56.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2da379dbebc0b76ef63ca68d8fc6e71c0f13e59432e0987e508c1820e6ab5239" dependencies = [ - "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", - "cexpr 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "cfg-if 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "clang-sys 0.28.1 (registry+https://github.com/rust-lang/crates.io-index)", - "clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)", - "env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "lazycell 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "peeking_take_while 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-hash 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "shlex 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "which 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags", + "cexpr", + "clang-sys", + "clap", + "env_logger", + "lazy_static", + "lazycell", + "log", + "peeking_take_while", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "which", ] [[package]] name = "bitflags" -version = "1.0.4" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "blake2-rfc" version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d6d530bdd2d52966a6d03b7a964add7ae1a288d25214066fd4b600f0f796400" dependencies = [ - "arrayvec 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", - "constant_time_eq 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "arrayvec 0.4.12", + "constant_time_eq", ] [[package]] name = "block-buffer" -version = "0.3.3" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "arrayref 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", - "byte-tools 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "generic-array", ] [[package]] -name = "build_const" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "byte-tools" -version = "0.2.0" +name = "byteorder" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] -name = "byteorder" -version = "1.3.1" +name = "bytes" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" [[package]] name = "cc" -version = "1.0.31" +version = "1.0.73" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" [[package]] name = "cexpr" -version = "0.3.6" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4aedb84272dbe89af497cf81375129abda4fc0a9e7c5d317498c15cc30c0d27" dependencies = [ - "nom 4.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "nom", ] [[package]] name = "cfg-if" -version = "0.1.7" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.6" +version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" dependencies = [ - "num-integer 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", + "libc", + "num-integer", + "num-traits 0.2.15", + "serde", + "time", + "winapi", ] [[package]] name = "clang-sys" -version = "0.28.1" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a050e2153c5be08febd6734e29298e844fdb0fa21aeddd63b4eb7baa106c69b" dependencies = [ - "glob 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "libloading 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "glob", + "libc", + "libloading", ] [[package]] name = "clap" -version = "2.32.0" +version = "2.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" dependencies = [ - "ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", - "atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", - "strsim 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "textwrap 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", + "ansi_term", + "atty", + "bitflags", + "strsim", + "textwrap", + "unicode-width", + "vec_map", ] [[package]] name = "cloudabi" version = "0.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" dependencies = [ - "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags", ] [[package]] name = "constant_time_eq" -version = "0.1.3" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" [[package]] -name = "crc" -version = "1.8.1" +name = "cpufeatures" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59a6001667ab124aebae2a495118e11d30984c3a653e99d86d58971708cf5e4b" dependencies = [ - "build_const 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "libc", ] [[package]] name = "crc32fast" -version = "1.2.0" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ - "cfg-if 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 1.0.0", ] [[package]] name = "croaring" -version = "0.4.4" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a00d14ad7d8cc067d7a5c93e8563791bfec3f7182361db955530db11d94ed63c" dependencies = [ - "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "croaring-sys 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder", + "croaring-sys", + "libc", ] [[package]] name = "croaring-sys" -version = "0.4.4" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5d6a46501bb403a61e43bc7cd19977b4f9c54efd703949b00259cc61afb5a86" dependencies = [ - "bindgen 0.52.0 (registry+https://github.com/rust-lang/crates.io-index)", - "cc 1.0.31 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "bindgen", + "cc", + "libc", ] -[[package]] -name = "crossbeam" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "crypto-mac" -version = "0.6.2" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" dependencies = [ - "constant_time_eq 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "generic-array 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "generic-array", + "subtle", ] [[package]] name = "digest" -version = "0.7.6" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "generic-array 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "generic-array", ] -[[package]] -name = "dtoa" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "enum_primitive" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4551092f4d519593039259a9ed8daedf0da12e5109c5280338073eaeb81180" dependencies = [ - "num-traits 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.1.43", ] [[package]] name = "env_logger" -version = "0.7.1" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3" dependencies = [ - "atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "humantime 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "termcolor 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "atty", + "humantime 2.1.0", + "log", + "regex", + "termcolor", ] -[[package]] -name = "failure" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "backtrace 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", - "failure_derive 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "failure_derive" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 0.4.27 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.11 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 0.15.29 (registry+https://github.com/rust-lang/crates.io-index)", - "synstructure 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "fake-simd" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "flate2" -version = "1.0.6" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f82b0f4c27ad9f8bfd1f3208d882da2b09c301bc1c828fd3a00d0216d2fbbff6" dependencies = [ - "crc32fast 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "miniz-sys 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "miniz_oxide_c_api 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "crc32fast", + "miniz_oxide", ] [[package]] name = "fnv" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "fuchsia-cprng" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" [[package]] name = "generic-array" -version = "0.9.0" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd48d33ec7f05fbfa152300fdad764757cbded343c1aa1cff2fbaf4134851803" dependencies = [ - "typenum 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)", + "typenum", + "version_check", ] +[[package]] +name = "gimli" +version = "0.26.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4" + [[package]] name = "glob" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "grin_core" -version = "3.1.0-beta.3" -dependencies = [ - "blake2-rfc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)", - "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "chrono 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", - "croaring 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "enum_primitive 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "failure_derive 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "grin_keychain 3.1.0-beta.3", - "grin_util 3.1.0-beta.3", - "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "lru-cache 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "num 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "num-bigint 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", - "siphasher 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "zeroize 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)", +version = "5.2.0-alpha.1" +dependencies = [ + "blake2-rfc", + "byteorder", + "bytes", + "chrono", + "croaring", + "enum_primitive", + "grin_keychain", + "grin_util", + "lazy_static", + "log", + "lru-cache", + "num", + "num-bigint", + "rand 0.6.5", + "serde", + "serde_derive", + "siphasher", + "thiserror", + "zeroize", ] [[package]] name = "grin_core-fuzz" version = "0.0.3" dependencies = [ - "grin_core 3.1.0-beta.3", - "grin_keychain 3.1.0-beta.3", - "libfuzzer-sys 0.1.0 (git+https://github.com/rust-fuzz/libfuzzer-sys.git)", + "grin_core", + "grin_keychain", + "libfuzzer-sys", ] [[package]] name = "grin_keychain" -version = "3.1.0-beta.3" -dependencies = [ - "blake2-rfc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)", - "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "digest 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)", - "grin_util 3.1.0-beta.3", - "hmac 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "pbkdf2 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", - "ripemd160 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)", - "sha2 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", - "zeroize 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)", +version = "5.2.0-alpha.1" +dependencies = [ + "blake2-rfc", + "byteorder", + "digest", + "grin_util", + "hmac", + "lazy_static", + "log", + "pbkdf2", + "rand 0.6.5", + "ripemd160", + "serde", + "serde_derive", + "serde_json", + "sha2", + "zeroize", ] [[package]] name = "grin_secp256k1zkp" -version = "0.7.7" +version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3af3c4c4829b3e2e7ee1d9a542833e4244912fbb887fabe44682558159b068a7" dependencies = [ - "arrayvec 0.3.25 (registry+https://github.com/rust-lang/crates.io-index)", - "cc 1.0.31 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)", - "zeroize 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)", + "arrayvec 0.3.25", + "cc", + "libc", + "rand 0.5.6", + "rustc-serialize", + "serde", + "serde_json", + "zeroize", ] [[package]] name = "grin_util" -version = "3.1.0-beta.3" +version = "5.2.0-alpha.2" +dependencies = [ + "backtrace", + "base64", + "byteorder", + "grin_secp256k1zkp", + "lazy_static", + "log", + "log4rs", + "parking_lot", + "rand 0.6.5", + "serde", + "serde_derive", + "walkdir", + "zeroize", + "zip", +] + +[[package]] +name = "hashbrown" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "607c8a29735385251a339424dd462993c0fed8fa09d378f259377df08c126022" + +[[package]] +name = "hermit-abi" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" dependencies = [ - "backtrace 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", - "base64 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)", - "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "grin_secp256k1zkp 0.7.7 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "log4rs 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", - "parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", - "walkdir 2.2.7 (registry+https://github.com/rust-lang/crates.io-index)", - "zeroize 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)", - "zip 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "libc", ] [[package]] name = "hmac" -version = "0.6.3" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" dependencies = [ - "crypto-mac 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", - "digest 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)", + "crypto-mac", + "digest", ] [[package]] name = "humantime" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f" dependencies = [ - "quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "quick-error", +] + +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + +[[package]] +name = "indexmap" +version = "1.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" +dependencies = [ + "autocfg 1.1.0", + "hashbrown", ] [[package]] name = "itoa" -version = "0.4.3" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "112c678d4050afce233f4f2852bb2eb519230b3cf12f33585275537d7e41578d" [[package]] name = "lazy_static" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "lazycell" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.66" +version = "0.2.126" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836" [[package]] name = "libfuzzer-sys" -version = "0.1.0" -source = "git+https://github.com/rust-fuzz/libfuzzer-sys.git#4a413199b5cb1bbed6a1d157b2342b925f8464ac" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "336244aaeab6a12df46480dc585802aa743a72d66b11937844c61bbca84c991d" dependencies = [ - "arbitrary 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "cc 1.0.31 (registry+https://github.com/rust-lang/crates.io-index)", + "arbitrary", + "cc", + "once_cell", ] [[package]] name = "libloading" -version = "0.5.0" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "efbc0f03f9a775e9f6aed295c6a1ba2253c5757a9e03d55c6caa46a681abcddd" dependencies = [ - "cc 1.0.31 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 1.0.0", + "winapi", ] [[package]] name = "linked-hash-map" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "linked-hash-map" -version = "0.5.1" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "lock_api" -version = "0.1.5" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75" dependencies = [ - "owning_ref 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "scopeguard 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "scopeguard", ] [[package]] name = "log" -version = "0.4.8" +version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" dependencies = [ - "cfg-if 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 1.0.0", + "serde", ] [[package]] name = "log-mdc" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a94d21414c1f4a51209ad204c1776a3d0765002c76c6abcb602a6f09f1e881c7" [[package]] name = "log4rs" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "antidote 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "chrono 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "flate2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "humantime 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "log-mdc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", - "serde-value 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_yaml 0.8.8 (registry+https://github.com/rust-lang/crates.io-index)", - "thread-id 3.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "typemap 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4d8e6e1d5f89acca713132acc6034f30bad09b961d1338161bdb71c08f6e4fa" +dependencies = [ + "arc-swap", + "chrono", + "flate2", + "fnv", + "humantime 1.3.0", + "libc", + "log", + "log-mdc", + "parking_lot", + "serde", + "serde-value", + "serde_derive", + "serde_json", + "serde_yaml", + "thread-id", + "typemap", + "winapi", ] [[package]] name = "lru-cache" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" dependencies = [ - "linked-hash-map 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "linked-hash-map", ] [[package]] name = "memchr" -version = "2.3.2" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "miniz-sys" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cc 1.0.31 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", -] +checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "miniz_oxide" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "adler32 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "miniz_oxide_c_api" -version = "0.2.1" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f5c75688da582b8ffc1f1799e9db273f32133c49e048f614d22ec3256773ccc" dependencies = [ - "cc 1.0.31 (registry+https://github.com/rust-lang/crates.io-index)", - "crc 1.8.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "miniz_oxide 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "adler", ] [[package]] name = "nodrop" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" [[package]] name = "nom" -version = "4.2.3" +version = "5.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffb4262d26ed83a1c0a33a38fe2bb15797329c85770da05e6b828ddb782627af" dependencies = [ - "memchr 2.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr", + "version_check", ] [[package]] name = "num" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8536030f9fea7127f841b45bb6243b27255787fb4eb83958aa1ef9d2fdc0c36" dependencies = [ - "num-bigint 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "num-complex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "num-integer 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)", - "num-iter 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)", - "num-rational 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "num-bigint", + "num-complex", + "num-integer", + "num-iter", + "num-rational", + "num-traits 0.2.15", ] [[package]] name = "num-bigint" -version = "0.2.2" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" dependencies = [ - "num-integer 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg 1.1.0", + "num-integer", + "num-traits 0.2.15", ] [[package]] name = "num-complex" -version = "0.2.1" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6b19411a9719e753aff12e5187b74d60d3dc449ec3f4dc21e3989c3f554bc95" dependencies = [ - "num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg 1.1.0", + "num-traits 0.2.15", ] [[package]] name = "num-integer" -version = "0.1.39" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" dependencies = [ - "num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg 1.1.0", + "num-traits 0.2.15", ] [[package]] name = "num-iter" -version = "0.1.37" +version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" dependencies = [ - "num-integer 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg 1.1.0", + "num-integer", + "num-traits 0.2.15", ] [[package]] name = "num-rational" -version = "0.2.1" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" dependencies = [ - "num-bigint 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "num-integer 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg 1.1.0", + "num-bigint", + "num-integer", + "num-traits 0.2.15", ] [[package]] name = "num-traits" version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92e5113e9fd4cc14ded8e499429f396a20f98c772a47cc8622a736e1ec843c31" dependencies = [ - "num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.2.15", ] [[package]] name = "num-traits" -version = "0.2.6" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" +dependencies = [ + "autocfg 1.1.0", +] + +[[package]] +name = "object" +version = "0.28.4" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e42c982f2d955fac81dd7e1d0e1426a7d702acd9c98d19ab01083a6a0328c424" +dependencies = [ + "memchr", +] [[package]] name = "odds" version = "0.2.26" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4eae0151b9dacf24fcc170d9995e511669a082856a91f958a2fe380bfab3fb22" [[package]] -name = "ordered-float" -version = "1.0.2" +name = "once_cell" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", -] +checksum = "18a6dbe30758c9f83eb00cbea4ac95966305f5a7772f3f42ebfc7fc7eddbd8e1" [[package]] -name = "owning_ref" -version = "0.4.0" +name = "opaque-debug" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" + +[[package]] +name = "ordered-float" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3305af35278dd29f46fcdd139e0b1fbfae2153f0e5928b39b035542dd31e37b7" dependencies = [ - "stable_deref_trait 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.2.15", ] [[package]] name = "parking_lot" -version = "0.6.4" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3a704eb390aafdc107b0e392f56a82b668e3a71366993b5340f5833fd62505e" dependencies = [ - "lock_api 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "parking_lot_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "lock_api", + "parking_lot_core", ] [[package]] name = "parking_lot_core" -version = "0.3.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d58c7c768d4ba344e3e8d72518ac13e259d7c7ade24167003b8488e10b6740a3" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 0.6.9 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.10", + "cloudabi", + "libc", + "redox_syscall", + "smallvec", + "winapi", ] [[package]] -name = "pbkdf2" +name = "password-hash" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77e0b28ace46c5a396546bcf443bf422b57049617433d8854227352a4a9b24e7" dependencies = [ - "base64 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)", - "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "constant_time_eq 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "crypto-mac 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", - "generic-array 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "hmac 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)", - "sha2 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", + "base64ct", + "rand_core 0.6.3", + "subtle", ] [[package]] -name = "peeking_take_while" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "podio" -version = "0.1.6" +name = "pbkdf2" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d95f5254224e617595d2cc3cc73ff0a5eaf2637519e25f03388154e9378b6ffa" +dependencies = [ + "base64ct", + "crypto-mac", + "hmac", + "password-hash", + "sha2", +] [[package]] -name = "proc-macro2" -version = "0.4.27" +name = "peeking_take_while" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", -] +checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" [[package]] name = "proc-macro2" -version = "1.0.8" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd96a1e8ed2596c337f8eae5f24924ec83f5ad5ab21ea8e455d3566c69fbcaf7" dependencies = [ - "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-ident", ] [[package]] name = "quick-error" -version = "1.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "quote" -version = "0.6.11" +version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 0.4.27 (registry+https://github.com/rust-lang/crates.io-index)", -] +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.2" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3bcdf212e9776fbcb2d23ab029360416bb1706b1aea2d1a5ba002727cbcab804" dependencies = [ - "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2", ] [[package]] name = "rand" version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c618c47cd3ebd209790115ab837de41425723956ad3ce2e6a7f09890947cacb9" dependencies = [ - "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "cloudabi", + "fuchsia-cprng", + "libc", + "rand_core 0.3.1", + "winapi", ] [[package]] name = "rand" version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca" dependencies = [ - "autocfg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_hc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_isaac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_jitter 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_xorshift 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg 0.1.8", + "libc", + "rand_chacha", + "rand_core 0.4.2", + "rand_hc", + "rand_isaac", + "rand_jitter", + "rand_os", + "rand_pcg", + "rand_xorshift", + "winapi", ] [[package]] name = "rand_chacha" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef" dependencies = [ - "autocfg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg 0.1.8", + "rand_core 0.3.1", ] [[package]] name = "rand_core" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" dependencies = [ - "rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.4.2", ] [[package]] name = "rand_core" -version = "0.4.0" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" + +[[package]] +name = "rand_core" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" [[package]] name = "rand_hc" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4" dependencies = [ - "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.3.1", ] [[package]] name = "rand_isaac" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08" dependencies = [ - "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.3.1", ] [[package]] name = "rand_jitter" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "libc", + "rand_core 0.4.2", + "winapi", ] [[package]] name = "rand_os" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" dependencies = [ - "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "cloudabi", + "fuchsia-cprng", + "libc", + "rand_core 0.4.2", + "rdrand", + "winapi", ] [[package]] name = "rand_pcg" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44" dependencies = [ - "autocfg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg 0.1.8", + "rand_core 0.4.2", ] [[package]] name = "rand_xorshift" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c" dependencies = [ - "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.3.1", ] [[package]] name = "rdrand" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" dependencies = [ - "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.3.1", ] [[package]] name = "redox_syscall" -version = "0.1.51" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "redox_termios" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "redox_syscall 0.1.51 (registry+https://github.com/rust-lang/crates.io-index)", -] +checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" [[package]] name = "regex" -version = "1.3.4" +version = "1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d83f127d94bdbcda4c8cc2e50f6f84f4b611f69c902699ca385a39c3a75f9ff1" dependencies = [ - "aho-corasick 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)", - "memchr 2.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "regex-syntax 0.6.14 (registry+https://github.com/rust-lang/crates.io-index)", - "thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "aho-corasick", + "memchr", + "regex-syntax", ] [[package]] name = "regex-syntax" -version = "0.6.14" +version = "0.6.26" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49b3de9ec5dc0a3417da371aab17d729997c15010e7fd24ff707773a33bddb64" [[package]] name = "ripemd160" -version = "0.7.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2eca4ecc81b7f313189bf73ce724400a07da2a6dac19588b03c8bd76a2dcc251" dependencies = [ - "block-buffer 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "byte-tools 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "digest 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)", + "block-buffer", + "digest", + "opaque-debug", ] [[package]] name = "rustc-demangle" -version = "0.1.13" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" [[package]] name = "rustc-hash" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustc-serialize" version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "rustc_version" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", -] +checksum = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda" [[package]] name = "ryu" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "safemem" -version = "0.3.0" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3f6f92acf49d1b98f7a81226834412ada05458b7364277387724a237f062695" [[package]] name = "same-file" -version = "1.0.4" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" dependencies = [ - "winapi-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-util", ] [[package]] name = "scopeguard" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "semver" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "semver-parser" -version = "0.7.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "serde" -version = "1.0.89" +version = "1.0.138" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1578c6245786b9d168c5447eeacfb96856573ca56c9d68fdcf394be134882a47" dependencies = [ - "serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive", ] [[package]] name = "serde-value" -version = "0.5.3" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a65a7291a8a568adcae4c10a677ebcedbc6c9cec91c054dee2ce40b0e3290eb" dependencies = [ - "ordered-float 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", + "ordered-float", + "serde", ] [[package]] name = "serde_derive" -version = "1.0.89" +version = "1.0.138" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "023e9b1467aef8a10fb88f25611870ada9800ef7e22afce356bb0d2387b6f27c" dependencies = [ - "proc-macro2 0.4.27 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.11 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 0.15.29 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2", + "quote", + "syn", ] [[package]] name = "serde_json" -version = "1.0.39" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82c2c1fdcd807d1098552c5b9a36e425e42e9fbd7c6a37a8425f390f781f7fa7" dependencies = [ - "itoa 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", - "ryu 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa", + "ryu", + "serde", ] [[package]] name = "serde_yaml" -version = "0.8.8" +version = "0.8.24" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "707d15895415db6628332b737c838b88c598522e4dc70647e59b72312924aebc" dependencies = [ - "dtoa 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", - "linked-hash-map 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", - "yaml-rust 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "indexmap", + "ryu", + "serde", + "yaml-rust", ] [[package]] name = "sha2" -version = "0.7.1" +version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" dependencies = [ - "block-buffer 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "byte-tools 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "digest 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)", - "fake-simd 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "block-buffer", + "cfg-if 1.0.0", + "cpufeatures", + "digest", + "opaque-debug", ] [[package]] name = "shlex" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2" [[package]] name = "siphasher" -version = "0.2.3" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7bd3e3206899af3f8b12af284fafc038cc1dc2b41d1b89dd17297221c5d225de" [[package]] name = "smallvec" -version = "0.6.9" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fd0db749597d91ff862fd1d55ea87f7855a744a8425a64695b6fca237d1dad1" [[package]] -name = "stable_deref_trait" -version = "1.1.1" +name = "strsim" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" [[package]] -name = "strsim" -version = "0.7.0" +name = "subtle" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "0.15.29" +version = "1.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c50aef8a904de4c23c788f104b7dddc7d6f79c647c7c8ce4cc8f73eb0ca773dd" dependencies = [ - "proc-macro2 0.4.27 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.11 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2", + "quote", + "unicode-ident", ] [[package]] name = "synstructure" -version = "0.10.1" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ - "proc-macro2 0.4.27 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.11 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 0.15.29 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2", + "quote", + "syn", + "unicode-xid", ] [[package]] name = "termcolor" -version = "1.0.4" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" dependencies = [ - "wincolor 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-util", ] [[package]] -name = "termion" -version = "1.5.1" +name = "textwrap" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_syscall 0.1.51 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_termios 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-width", ] [[package]] -name = "textwrap" -version = "0.10.0" +name = "thiserror" +version = "1.0.31" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd829fe32373d27f76265620b5309d0340cb8550f523c1dda251d6298069069a" dependencies = [ - "unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "thiserror-impl", ] [[package]] -name = "thread-id" -version = "3.3.0" +name = "thiserror-impl" +version = "1.0.31" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0396bc89e626244658bef819e22d0cc459e795a5ebe878e6ec336d1674a8d79a" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_syscall 0.1.51 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2", + "quote", + "syn", ] [[package]] -name = "thread_local" -version = "1.0.1" +name = "thread-id" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7fbf4c9d56b320106cd64fd024dadfa0be7cb4706725fc44a7d7ce952d820c1" dependencies = [ - "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc", + "redox_syscall", + "winapi", ] [[package]] name = "time" -version = "0.1.42" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_syscall 0.1.51 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "libc", + "wasi", + "winapi", ] [[package]] name = "traitobject" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "efd1f82c56340fdf16f2a953d7bda4f8fdffba13d93b00844c25572110b26079" [[package]] name = "typemap" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "653be63c80a3296da5551e1bfd2cca35227e13cdd08c6668903ae2f4f77aa1f6" dependencies = [ - "unsafe-any 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "unsafe-any", ] [[package]] name = "typenum" -version = "1.10.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" [[package]] -name = "unicode-width" -version = "0.1.5" +name = "unicode-ident" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5bd2fe26506023ed7b5e1e315add59d6f584c621d037f9368fea9cfb988f368c" [[package]] -name = "unicode-xid" -version = "0.1.0" +name = "unicode-width" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" [[package]] name = "unicode-xid" -version = "0.2.0" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "957e51f3646910546462e67d5f7599b9e4fb8acdd304b087a6494730f9eebf04" [[package]] name = "unsafe-any" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f30360d7979f5e9c6e6cea48af192ea8fab4afb3cf72597154b8f08935bc9c7f" dependencies = [ - "traitobject 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "traitobject", ] [[package]] name = "vec_map" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] name = "version_check" -version = "0.1.5" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "walkdir" -version = "2.2.7" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" dependencies = [ - "same-file 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "same-file", + "winapi", + "winapi-util", ] +[[package]] +name = "wasi" +version = "0.10.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" + [[package]] name = "which" -version = "3.1.0" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d011071ae14a2f6671d0b74080ae0cd8ebf3a6f8c9589a2cd45f23126fe29724" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc", ] [[package]] name = "winapi" -version = "0.3.6" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ - "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", ] [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.2" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" dependencies = [ - "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi", ] [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "wincolor" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", -] +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "yaml-rust" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" dependencies = [ - "linked-hash-map 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "linked-hash-map", ] [[package]] name = "zeroize" -version = "0.9.3" +version = "1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20b578acffd8516a6c3f2a1bdefc1ec37e547bb4e0fb8b6b01a4cafc886b4442" dependencies = [ - "zeroize_derive 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)", + "zeroize_derive", ] [[package]] name = "zeroize_derive" -version = "0.9.3" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f8f187641dad4f680d25c4bfc4225b418165984179f26ca76ec4fb6441d3a17" dependencies = [ - "proc-macro2 0.4.27 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.11 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 0.15.29 (registry+https://github.com/rust-lang/crates.io-index)", - "synstructure 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2", + "quote", + "syn", + "synstructure", ] [[package]] name = "zip" -version = "0.5.3" +version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93ab48844d61251bb3835145c521d88aa4031d7139e8485990f60ca911fa0815" dependencies = [ - "crc32fast 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "podio 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[metadata] -"checksum adler32 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7e522997b529f05601e05166c07ed17789691f562762c7f3b987263d2dedee5c" -"checksum aho-corasick 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)" = "743ad5a418686aad3b87fd14c43badd828cf26e214a00f92a384291cf22e1811" -"checksum ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" -"checksum antidote 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "34fde25430d87a9388dadbe6e34d7f72a462c8b43ac8d309b42b0a8505d7e2a5" -"checksum arbitrary 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "6c7d1523aa3a127adf8b27af2404c03c12825b4c4d0698f01648d63fa9df62ee" -"checksum arrayref 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "0d382e583f07208808f6b1249e60848879ba3543f57c32277bf52d69c2f0f0ee" -"checksum arrayvec 0.3.25 (registry+https://github.com/rust-lang/crates.io-index)" = "06f59fe10306bb78facd90d28c2038ad23ffaaefa85bac43c8a434cde383334f" -"checksum arrayvec 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)" = "92c7fb76bc8826a8b33b4ee5bb07a247a81e76764ab4d55e8f73e3a4d8808c71" -"checksum atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "9a7d5b8723950951411ee34d271d99dddcc2035a16ab25310ea2c8cfd4369652" -"checksum autocfg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a6d640bee2da49f60a4068a7fae53acde8982514ab7bae8b8cea9e88cbcfd799" -"checksum backtrace 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)" = "cd5a90e2b463010cd0e0ce9a11d4a9d5d58d9f41d4a6ba3dcaf9e68b466e88b4" -"checksum backtrace-sys 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)" = "797c830ac25ccc92a7f8a7b9862bde440715531514594a6154e3d4a54dd769b6" -"checksum base64 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)" = "489d6c0ed21b11d038c31b6ceccca973e65d73ba3bd8ecb9a2babf5546164643" -"checksum bindgen 0.52.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f1c85344eb535a31b62f0af37be84441ba9e7f0f4111eb0530f43d15e513fe57" -"checksum bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "228047a76f468627ca71776ecdebd732a3423081fcf5125585bcd7c49886ce12" -"checksum blake2-rfc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)" = "5d6d530bdd2d52966a6d03b7a964add7ae1a288d25214066fd4b600f0f796400" -"checksum block-buffer 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a076c298b9ecdb530ed9d967e74a6027d6a7478924520acddcddc24c1c8ab3ab" -"checksum build_const 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "39092a32794787acd8525ee150305ff051b0aa6cc2abaf193924f5ab05425f39" -"checksum byte-tools 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "560c32574a12a89ecd91f5e742165893f86e3ab98d21f8ea548658eb9eef5f40" -"checksum byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a019b10a2a7cdeb292db131fc8113e57ea2a908f6e7894b0c3c671893b65dbeb" -"checksum cc 1.0.31 (registry+https://github.com/rust-lang/crates.io-index)" = "c9ce8bb087aacff865633f0bd5aeaed910fe2fe55b55f4739527f2e023a2e53d" -"checksum cexpr 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "fce5b5fb86b0c57c20c834c1b412fd09c77c8a59b9473f86272709e78874cd1d" -"checksum cfg-if 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "11d43355396e872eefb45ce6342e4374ed7bc2b3a502d1b28e36d6e23c05d1f4" -"checksum chrono 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "45912881121cb26fad7c38c17ba7daa18764771836b34fab7d3fbd93ed633878" -"checksum clang-sys 0.28.1 (registry+https://github.com/rust-lang/crates.io-index)" = "81de550971c976f176130da4b2978d3b524eaa0fd9ac31f3ceb5ae1231fb4853" -"checksum clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b957d88f4b6a63b9d70d5f454ac8011819c6efa7727858f458ab71c756ce2d3e" -"checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" -"checksum constant_time_eq 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8ff012e225ce166d4422e0e78419d901719760f62ae2b7969ca6b564d1b54a9e" -"checksum crc 1.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d663548de7f5cca343f1e0a48d14dcfb0e9eb4e079ec58883b7251539fa10aeb" -"checksum crc32fast 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ba125de2af0df55319f41944744ad91c71113bf74a4646efff39afe1f6842db1" -"checksum croaring 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "52e9057c1caf8e9debd6f938a12ff24028f3c7f85d24f502f46f3c9601905464" -"checksum croaring-sys 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "b7d3b66d75dc466ec547604de0517eb4e1a51fd79a83eaff4409f81167dacdc8" -"checksum crossbeam 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "24ce9782d4d5c53674646a6a4c1863a21a8fc0cb649b3c94dfc16e45071dea19" -"checksum crypto-mac 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7afa06d05a046c7a47c3a849907ec303504608c927f4e85f7bfff22b7180d971" -"checksum digest 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)" = "03b072242a8cbaf9c145665af9d250c59af3b958f83ed6824e13533cf76d5b90" -"checksum dtoa 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "6d301140eb411af13d3115f9a562c85cc6b541ade9dfa314132244aaee7489dd" -"checksum enum_primitive 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "be4551092f4d519593039259a9ed8daedf0da12e5109c5280338073eaeb81180" -"checksum env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" -"checksum failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "795bd83d3abeb9220f257e597aa0080a508b27533824adf336529648f6abf7e2" -"checksum failure_derive 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "ea1063915fd7ef4309e222a5a07cf9c319fb9c7836b1f89b85458672dbb127e1" -"checksum fake-simd 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" -"checksum flate2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2291c165c8e703ee54ef3055ad6188e3d51108e2ded18e9f2476e774fc5ad3d4" -"checksum fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3" -"checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" -"checksum generic-array 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ef25c5683767570c2bbd7deba372926a55eaae9982d7726ee2a1050239d45b9d" -"checksum glob 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" -"checksum grin_secp256k1zkp 0.7.7 (registry+https://github.com/rust-lang/crates.io-index)" = "23027a7673df2c2b20fb9589d742ff400a10a9c3e4c769a77e9fa3bd19586822" -"checksum hmac 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "733e1b3ac906631ca01ebb577e9bb0f5e37a454032b9036b5eaea4013ed6f99a" -"checksum humantime 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f" -"checksum itoa 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "1306f3464951f30e30d12373d31c79fbd52d236e5e896fd92f96ec7babbbe60b" -"checksum lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bc5729f27f159ddd61f4df6228e827e86643d4d3e7c32183cb30a1c08f604a14" -"checksum lazycell 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b294d6fa9ee409a054354afc4352b0b9ef7ca222c69b8812cbea9e7d2bf3783f" -"checksum libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)" = "d515b1f41455adea1313a4a2ac8a8a477634fbae63cc6100e3aebb207ce61558" -"checksum libfuzzer-sys 0.1.0 (git+https://github.com/rust-fuzz/libfuzzer-sys.git)" = "" -"checksum libloading 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9c3ad660d7cb8c5822cd83d10897b0f1f1526792737a179e73896152f85b88c2" -"checksum linked-hash-map 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7860ec297f7008ff7a1e3382d7f7e1dcd69efc94751a2284bafc3d013c2aa939" -"checksum linked-hash-map 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "70fb39025bc7cdd76305867c4eccf2f2dcf6e9a57f5b21a93e1c2d86cd03ec9e" -"checksum lock_api 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "62ebf1391f6acad60e5c8b43706dde4582df75c06698ab44511d15016bc2442c" -"checksum log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" -"checksum log-mdc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a94d21414c1f4a51209ad204c1776a3d0765002c76c6abcb602a6f09f1e881c7" -"checksum log4rs 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "25e0fc8737a634116a2deb38d821e4400ed16ce9dcb0d628a978d399260f5902" -"checksum lru-cache 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4d06ff7ff06f729ce5f4e227876cb88d10bc59cd4ae1e09fbb2bde15c850dc21" -"checksum memchr 2.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "53445de381a1f436797497c61d851644d0e8e88e6140f22872ad33a704933978" -"checksum miniz-sys 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "0300eafb20369952951699b68243ab4334f4b10a88f411c221d444b36c40e649" -"checksum miniz_oxide 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c468f2369f07d651a5d0bb2c9079f8488a66d5466efe42d0c5c6466edcb7f71e" -"checksum miniz_oxide_c_api 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b7fe927a42e3807ef71defb191dc87d4e24479b221e67015fe38ae2b7b447bab" -"checksum nodrop 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "2f9667ddcc6cc8a43afc9b7917599d7216aa09c463919ea32c59ed6cac8bc945" -"checksum nom 4.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2ad2a91a8e869eeb30b9cb3119ae87773a8f4ae617f41b1eb9c154b2905f7bd6" -"checksum num 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cf4825417e1e1406b3782a8ce92f4d53f26ec055e3622e1881ca8e9f5f9e08db" -"checksum num-bigint 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "57450397855d951f1a41305e54851b1a7b8f5d2e349543a02a2effe25459f718" -"checksum num-complex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "107b9be86cd2481930688277b675b0114578227f034674726605b8a482d8baf8" -"checksum num-integer 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)" = "e83d528d2677f0518c570baf2b7abdcf0cd2d248860b68507bdcb3e91d4c0cea" -"checksum num-iter 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)" = "af3fdbbc3291a5464dc57b03860ec37ca6bf915ed6ee385e7c6c052c422b2124" -"checksum num-rational 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4e96f040177bb3da242b5b1ecf3f54b5d5af3efbbfb18608977a5d2767b22f10" -"checksum num-traits 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)" = "92e5113e9fd4cc14ded8e499429f396a20f98c772a47cc8622a736e1ec843c31" -"checksum num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0b3a5d7cc97d6d30d8b9bc8fa19bf45349ffe46241e8816f50f62f6d6aaabee1" -"checksum odds 0.2.26 (registry+https://github.com/rust-lang/crates.io-index)" = "4eae0151b9dacf24fcc170d9995e511669a082856a91f958a2fe380bfab3fb22" -"checksum ordered-float 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "18869315e81473c951eb56ad5558bbc56978562d3ecfb87abb7a1e944cea4518" -"checksum owning_ref 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "49a4b8ea2179e6a2e27411d3bca09ca6dd630821cf6894c6c7c8467a8ee7ef13" -"checksum parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "f0802bff09003b291ba756dc7e79313e51cc31667e94afbe847def490424cde5" -"checksum parking_lot_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ad7f7e6ebdc79edff6fdcb87a55b620174f7a989e3eb31b65231f4af57f00b8c" -"checksum pbkdf2 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "0c09cddfbfc98de7f76931acf44460972edb4023eb14d0c6d4018800e552d8e0" -"checksum peeking_take_while 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" -"checksum podio 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "780fb4b6698bbf9cf2444ea5d22411cef2953f0824b98f33cf454ec5615645bd" -"checksum proc-macro2 0.4.27 (registry+https://github.com/rust-lang/crates.io-index)" = "4d317f9caece796be1980837fd5cb3dfec5613ebdb04ad0956deea83ce168915" -"checksum proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)" = "3acb317c6ff86a4e579dfa00fc5e6cca91ecbb4e7eb2df0468805b674eb88548" -"checksum quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9274b940887ce9addde99c4eee6b5c44cc494b182b97e73dc8ffdcb3397fd3f0" -"checksum quote 0.6.11 (registry+https://github.com/rust-lang/crates.io-index)" = "cdd8e04bd9c52e0342b406469d494fcb033be4bdbe5c606016defbb1681411e1" -"checksum quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "053a8c8bcc71fcce321828dc897a98ab9760bef03a4fc36693c231e5b3216cfe" -"checksum rand 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c618c47cd3ebd209790115ab837de41425723956ad3ce2e6a7f09890947cacb9" -"checksum rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca" -"checksum rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef" -"checksum rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" -"checksum rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d0e7a549d590831370895ab7ba4ea0c1b6b011d106b5ff2da6eee112615e6dc0" -"checksum rand_hc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4" -"checksum rand_isaac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08" -"checksum rand_jitter 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" -"checksum rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" -"checksum rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44" -"checksum rand_xorshift 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c" -"checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" -"checksum redox_syscall 0.1.51 (registry+https://github.com/rust-lang/crates.io-index)" = "423e376fffca3dfa06c9e9790a9ccd282fafb3cc6e6397d01dbf64f9bacc6b85" -"checksum redox_termios 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7e891cfe48e9100a70a3b6eb652fef28920c117d366339687bd5576160db0f76" -"checksum regex 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "322cf97724bea3ee221b78fe25ac9c46114ebb51747ad5babd51a2fc6a8235a8" -"checksum regex-syntax 0.6.14 (registry+https://github.com/rust-lang/crates.io-index)" = "b28dfe3fe9badec5dbf0a79a9cccad2cfc2ab5484bdb3e44cbd1ae8b3ba2be06" -"checksum ripemd160 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "482aa56cc68aaeccdaaff1cc5a72c247da8bbad3beb174ca5741f274c22883fb" -"checksum rustc-demangle 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "adacaae16d02b6ec37fdc7acfcddf365978de76d1983d3ee22afc260e1ca9619" -"checksum rustc-hash 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" -"checksum rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)" = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda" -"checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" -"checksum ryu 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "eb9e9b8cde282a9fe6a42dd4681319bfb63f121b8a8ee9439c6f4107e58a46f7" -"checksum safemem 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8dca453248a96cb0749e36ccdfe2b0b4e54a61bfef89fb97ec621eb8e0a93dd9" -"checksum same-file 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8f20c4be53a8a1ff4c1f1b2bd14570d2f634628709752f0702ecdd2b3f9a5267" -"checksum scopeguard 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "94258f53601af11e6a49f722422f6e3425c52b06245a5cf9bc09908b174f5e27" -"checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" -"checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" -"checksum serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)" = "92514fb95f900c9b5126e32d020f5c6d40564c27a5ea6d1d7d9f157a96623560" -"checksum serde-value 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7a663f873dedc4eac1a559d4c6bc0d0b2c34dc5ac4702e105014b8281489e44f" -"checksum serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)" = "bb6eabf4b5914e88e24eea240bb7c9f9a2cbc1bbbe8d961d381975ec3c6b806c" -"checksum serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)" = "5a23aa71d4a4d43fdbfaac00eff68ba8a06a51759a89ac3304323e800c4dd40d" -"checksum serde_yaml 0.8.8 (registry+https://github.com/rust-lang/crates.io-index)" = "0887a8e097a69559b56aa2526bf7aff7c3048cf627dff781f0b56a6001534593" -"checksum sha2 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9eb6be24e4c23a84d7184280d2722f7f2731fcdd4a9d886efbfe4413e4847ea0" -"checksum shlex 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2" -"checksum siphasher 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "0b8de496cf83d4ed58b6be86c3a275b8602f6ffe98d3024a869e124147a9a3ac" -"checksum smallvec 0.6.9 (registry+https://github.com/rust-lang/crates.io-index)" = "c4488ae950c49d403731982257768f48fada354a5203fe81f9bb6f43ca9002be" -"checksum stable_deref_trait 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "dba1a27d3efae4351c8051072d619e3ade2820635c3958d826bfea39d59b54c8" -"checksum strsim 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bb4f380125926a99e52bc279241539c018323fab05ad6368b56f93d9369ff550" -"checksum syn 0.15.29 (registry+https://github.com/rust-lang/crates.io-index)" = "1825685f977249735d510a242a6727b46efe914bb67e38d30c071b1b72b1d5c2" -"checksum synstructure 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "73687139bf99285483c96ac0add482c3776528beac1d97d444f6e91f203a2015" -"checksum termcolor 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "4096add70612622289f2fdcdbd5086dc81c1e2675e6ae58d6c4f62a16c6d7f2f" -"checksum termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "689a3bdfaab439fd92bc87df5c4c78417d3cbe537487274e9b0b2dce76e92096" -"checksum textwrap 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "307686869c93e71f94da64286f9a9524c0f308a9e1c87a583de8e9c9039ad3f6" -"checksum thread-id 3.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c7fbf4c9d56b320106cd64fd024dadfa0be7cb4706725fc44a7d7ce952d820c1" -"checksum thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" -"checksum time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)" = "db8dcfca086c1143c9270ac42a2bbd8a7ee477b78ac8e45b19abfb0cbede4b6f" -"checksum traitobject 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "efd1f82c56340fdf16f2a953d7bda4f8fdffba13d93b00844c25572110b26079" -"checksum typemap 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "653be63c80a3296da5551e1bfd2cca35227e13cdd08c6668903ae2f4f77aa1f6" -"checksum typenum 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "612d636f949607bdf9b123b4a6f6d966dedf3ff669f7f045890d3a4a73948169" -"checksum unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "882386231c45df4700b275c7ff55b6f3698780a650026380e72dabe76fa46526" -"checksum unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" -"checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" -"checksum unsafe-any 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f30360d7979f5e9c6e6cea48af192ea8fab4afb3cf72597154b8f08935bc9c7f" -"checksum vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "05c78687fb1a80548ae3250346c3db86a80a7cdd77bda190189f2d0a0987c81a" -"checksum version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "914b1a6776c4c929a602fafd8bc742e06365d4bcbe48c30f9cca5824f70dc9dd" -"checksum walkdir 2.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "9d9d7ed3431229a144296213105a390676cc49c9b6a72bd19f3176c98e129fa1" -"checksum which 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5475d47078209a02e60614f7ba5e645ef3ed60f771920ac1906d7c1cc65024c8" -"checksum winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "92c1eb33641e276cfa214a0522acad57be5c56b10cb348b3c5117db75f3ac4b0" -"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" -"checksum winapi-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7168bab6e1daee33b4557efd0e95d5ca70a03706d39fa5f3fe7a236f584b03c9" -"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" -"checksum wincolor 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "561ed901ae465d6185fa7864d63fbd5720d0ef718366c9a4dc83cf6170d7e9ba" -"checksum yaml-rust 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "65923dd1784f44da1d2c3dbbc5e822045628c590ba72123e1c73d3c230c4434d" -"checksum zeroize 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)" = "45af6a010d13e4cf5b54c94ba5a2b2eba5596b9e46bf5875612d332a1f2b3f86" -"checksum zeroize_derive 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)" = "080616bd0e31f36095288bb0acdf1f78ef02c2fa15527d7e993f2a6c7591643e" -"checksum zip 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3c21bb410afa2bd823a047f5bda3adb62f51074ac7e06263b2c97ecdd47e9fc6" + "byteorder", + "crc32fast", + "thiserror", +] diff --git a/core/fuzz/Cargo.toml b/core/fuzz/Cargo.toml index d216319782..158391adfb 100644 --- a/core/fuzz/Cargo.toml +++ b/core/fuzz/Cargo.toml @@ -3,15 +3,12 @@ name = "grin_core-fuzz" version = "0.0.3" authors = ["Grin Developers "] publish = false - -[package.metadata] -cargo-fuzz = true +edition = "2018" [dependencies] +libfuzzer-sys = "0.4.0" grin_core = { path = ".."} grin_keychain = { path = "../../keychain"} -[dependencies.libfuzzer-sys] -git = "https://github.com/rust-fuzz/libfuzzer-sys.git" # Prevent this from interfering with workspaces [workspace] diff --git a/core/fuzz/fuzz_targets/block_read_v1.rs b/core/fuzz/fuzz_targets/block_read_v1.rs index 27cc418b94..5f24ee33de 100644 --- a/core/fuzz/fuzz_targets/block_read_v1.rs +++ b/core/fuzz/fuzz_targets/block_read_v1.rs @@ -1,12 +1,13 @@ #![no_main] +use libfuzzer_sys::fuzz_target; + extern crate grin_core; -#[macro_use] -extern crate libfuzzer_sys; use grin_core::core::UntrustedBlock; -use grin_core::ser; +use grin_core::ser::{self, DeserializationMode}; fuzz_target!(|data: &[u8]| { let mut d = data.clone(); - let _t: Result = ser::deserialize(&mut d, ser::ProtocolVersion(1)); + let _t: Result = + ser::deserialize(&mut d, ser::ProtocolVersion(1), DeserializationMode::Full); }); diff --git a/core/fuzz/fuzz_targets/block_read_v2.rs b/core/fuzz/fuzz_targets/block_read_v2.rs index 40076b32cc..07fe2c4446 100644 --- a/core/fuzz/fuzz_targets/block_read_v2.rs +++ b/core/fuzz/fuzz_targets/block_read_v2.rs @@ -1,12 +1,13 @@ #![no_main] +use libfuzzer_sys::fuzz_target; + extern crate grin_core; -#[macro_use] -extern crate libfuzzer_sys; use grin_core::core::UntrustedBlock; -use grin_core::ser; +use grin_core::ser::{self, DeserializationMode}; fuzz_target!(|data: &[u8]| { let mut d = data.clone(); - let _t: Result = ser::deserialize(&mut d, ser::ProtocolVersion(2)); + let _t: Result = + ser::deserialize(&mut d, ser::ProtocolVersion(2), DeserializationMode::Full); }); diff --git a/core/fuzz/fuzz_targets/compact_block_read_v1.rs b/core/fuzz/fuzz_targets/compact_block_read_v1.rs index 2fca2dd0a0..f421f40705 100644 --- a/core/fuzz/fuzz_targets/compact_block_read_v1.rs +++ b/core/fuzz/fuzz_targets/compact_block_read_v1.rs @@ -1,13 +1,13 @@ #![no_main] +use libfuzzer_sys::fuzz_target; + extern crate grin_core; -#[macro_use] -extern crate libfuzzer_sys; use grin_core::core::UntrustedCompactBlock; -use grin_core::ser; +use grin_core::ser::{self, DeserializationMode}; fuzz_target!(|data: &[u8]| { let mut d = data.clone(); let _t: Result = - ser::deserialize(&mut d, ser::ProtocolVersion(1)); + ser::deserialize(&mut d, ser::ProtocolVersion(1), DeserializationMode::Full); }); diff --git a/core/fuzz/fuzz_targets/compact_block_read_v2.rs b/core/fuzz/fuzz_targets/compact_block_read_v2.rs index 9cac9a577a..a27b766013 100644 --- a/core/fuzz/fuzz_targets/compact_block_read_v2.rs +++ b/core/fuzz/fuzz_targets/compact_block_read_v2.rs @@ -1,13 +1,13 @@ #![no_main] +use libfuzzer_sys::fuzz_target; + extern crate grin_core; -#[macro_use] -extern crate libfuzzer_sys; use grin_core::core::UntrustedCompactBlock; -use grin_core::ser; +use grin_core::ser::{self, DeserializationMode}; fuzz_target!(|data: &[u8]| { let mut d = data.clone(); let _t: Result = - ser::deserialize(&mut d, ser::ProtocolVersion(2)); + ser::deserialize(&mut d, ser::ProtocolVersion(2), DeserializationMode::Full); }); diff --git a/core/fuzz/fuzz_targets/transaction_read_v1.rs b/core/fuzz/fuzz_targets/transaction_read_v1.rs index 4c485ba760..2ef5be19e5 100644 --- a/core/fuzz/fuzz_targets/transaction_read_v1.rs +++ b/core/fuzz/fuzz_targets/transaction_read_v1.rs @@ -1,12 +1,13 @@ #![no_main] +use libfuzzer_sys::fuzz_target; + extern crate grin_core; -#[macro_use] -extern crate libfuzzer_sys; use grin_core::core::Transaction; -use grin_core::ser; +use grin_core::ser::{self, DeserializationMode}; fuzz_target!(|data: &[u8]| { let mut d = data.clone(); - let _t: Result = ser::deserialize(&mut d, ser::ProtocolVersion(1)); + let _t: Result = + ser::deserialize(&mut d, ser::ProtocolVersion(1), DeserializationMode::Full); }); diff --git a/core/fuzz/fuzz_targets/transaction_read_v2.rs b/core/fuzz/fuzz_targets/transaction_read_v2.rs index a483c35565..f4c5c7d660 100644 --- a/core/fuzz/fuzz_targets/transaction_read_v2.rs +++ b/core/fuzz/fuzz_targets/transaction_read_v2.rs @@ -1,12 +1,13 @@ #![no_main] +use libfuzzer_sys::fuzz_target; + extern crate grin_core; -#[macro_use] -extern crate libfuzzer_sys; use grin_core::core::Transaction; -use grin_core::ser; +use grin_core::ser::{self, DeserializationMode}; fuzz_target!(|data: &[u8]| { let mut d = data.clone(); - let _t: Result = ser::deserialize(&mut d, ser::ProtocolVersion(2)); + let _t: Result = + ser::deserialize(&mut d, ser::ProtocolVersion(2), DeserializationMode::Full); }); diff --git a/core/src/consensus.rs b/core/src/consensus.rs index daefa6619a..d21da89e4f 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -19,7 +19,7 @@ //! here. use crate::core::block::HeaderVersion; -use crate::core::hash::{Hash, ZERO_HASH}; +use crate::core::hash::Hash; use crate::global; use crate::pow::Difficulty; use std::cmp::{max, min}; @@ -113,13 +113,13 @@ pub const CUT_THROUGH_HORIZON: u32 = WEEK_HEIGHT as u32; pub const STATE_SYNC_THRESHOLD: u32 = 2 * DAY_HEIGHT as u32; /// Weight of an input when counted against the max block weight capacity -pub const BLOCK_INPUT_WEIGHT: u64 = 1; +pub const INPUT_WEIGHT: u64 = 1; /// Weight of an output when counted against the max block weight capacity -pub const BLOCK_OUTPUT_WEIGHT: u64 = 21; +pub const OUTPUT_WEIGHT: u64 = 21; /// Weight of a kernel when counted against the max block weight capacity -pub const BLOCK_KERNEL_WEIGHT: u64 = 3; +pub const KERNEL_WEIGHT: u64 = 3; /// Total maximum block weight. At current sizes, this means a maximum /// theoretical size of: @@ -145,6 +145,9 @@ pub const TESTING_SECOND_HARD_FORK: u64 = 6; /// AutomatedTesting and UserTesting HF3 height. pub const TESTING_THIRD_HARD_FORK: u64 = 9; +/// Fork every 3 blocks +pub const TESTING_HARD_FORK_INTERVAL: u64 = 3; + /// Check whether the block version is valid at a given height /// MWC doesn't want like grin change the algorithms for mining. So version is constant pub fn header_version(height: u64) -> HeaderVersion { @@ -246,11 +249,13 @@ pub const UNIT_DIFFICULTY: u64 = pub const INITIAL_DIFFICULTY: u64 = 1_000_000 * UNIT_DIFFICULTY; /// Minimal header information required for the Difficulty calculation to -/// take place +/// take place. Used to iterate through a number of blocks. Note that an instance +/// of this is unable to calculate its own hash, due to an optimization that prevents +/// the header's PoW proof nonces from being deserialized on read #[derive(Clone, Debug, Eq, PartialEq)] -pub struct HeaderInfo { - /// Block hash, ZERO_HASH when this is a sythetic entry. - pub block_hash: Hash, +pub struct HeaderDifficultyInfo { + /// Hash of this block + pub hash: Option, /// Timestamp of the header, 1 when not used (returned info) pub timestamp: u64, /// Network difficulty or next difficulty to use @@ -261,17 +266,17 @@ pub struct HeaderInfo { pub is_secondary: bool, } -impl HeaderInfo { +impl HeaderDifficultyInfo { /// Default constructor pub fn new( - block_hash: Hash, + hash: Option, timestamp: u64, difficulty: Difficulty, secondary_scaling: u32, is_secondary: bool, - ) -> HeaderInfo { - HeaderInfo { - block_hash, + ) -> HeaderDifficultyInfo { + HeaderDifficultyInfo { + hash, timestamp, difficulty, secondary_scaling, @@ -281,9 +286,9 @@ impl HeaderInfo { /// Constructor from a timestamp and difficulty, setting a default secondary /// PoW factor - pub fn from_ts_diff(timestamp: u64, difficulty: Difficulty) -> HeaderInfo { - HeaderInfo { - block_hash: ZERO_HASH, + pub fn from_ts_diff(timestamp: u64, difficulty: Difficulty) -> HeaderDifficultyInfo { + HeaderDifficultyInfo { + hash: None, timestamp, difficulty, secondary_scaling: global::initial_graph_weight(), @@ -294,9 +299,12 @@ impl HeaderInfo { /// Constructor from a difficulty and secondary factor, setting a default /// timestamp - pub fn from_diff_scaling(difficulty: Difficulty, secondary_scaling: u32) -> HeaderInfo { - HeaderInfo { - block_hash: ZERO_HASH, + pub fn from_diff_scaling( + difficulty: Difficulty, + secondary_scaling: u32, + ) -> HeaderDifficultyInfo { + HeaderDifficultyInfo { + hash: None, timestamp: 1, difficulty, secondary_scaling, @@ -328,9 +336,9 @@ pub fn clamp(actual: u64, goal: u64, clamp_factor: u64) -> u64 { /// /// The secondary proof-of-work factor is calculated along the same lines, as /// an adjustment on the deviation against the ideal value. -pub fn next_difficulty(height: u64, cursor: T) -> HeaderInfo +pub fn next_difficulty(height: u64, cursor: T) -> HeaderDifficultyInfo where - T: IntoIterator, + T: IntoIterator, { // Create vector of difficulty data running from earliest // to latest, and pad with simulated pre-genesis data to allow earlier @@ -361,16 +369,16 @@ where // minimum difficulty avoids getting stuck due to dampening let difficulty = max(MIN_DIFFICULTY, diff_sum * BLOCK_TIME_SEC / adj_ts); - HeaderInfo::from_diff_scaling(Difficulty::from_num(difficulty), sec_pow_scaling) + HeaderDifficultyInfo::from_diff_scaling(Difficulty::from_num(difficulty), sec_pow_scaling) } /// Count, in units of 1/100 (a percent), the number of "secondary" (AR) blocks in the provided window of blocks. -pub fn ar_count(_height: u64, diff_data: &[HeaderInfo]) -> u64 { +pub fn ar_count(_height: u64, diff_data: &[HeaderDifficultyInfo]) -> u64 { 100 * diff_data.iter().filter(|n| n.is_secondary).count() as u64 } /// Factor by which the secondary proof of work difficulty will be adjusted -pub fn secondary_pow_scaling(height: u64, diff_data: &[HeaderInfo]) -> u32 { +pub fn secondary_pow_scaling(height: u64, diff_data: &[HeaderDifficultyInfo]) -> u32 { // Get the scaling factor sum of the last DIFFICULTY_ADJUST_WINDOW elements let scale_sum: u64 = diff_data.iter().map(|dd| dd.secondary_scaling as u64).sum(); @@ -1130,4 +1138,77 @@ mod test { // Expected 20M in total. The coin base is exactly 20M assert_eq!(total_blocks_reward, 20000000000000000); } + + // Brute force test to validate that calc_mwc_block_reward and calc_mwc_block_overage are in sync fo all blocks + // Please note, the test is slow, it checking values for every block that will be generated until reward will be gone + // Test is 'ignore' because it takes about an hour to run + #[test] + #[ignore] + fn test_rewards_full_cycle() { + global::set_local_chain_type(global::ChainTypes::Mainnet); + + let mut total_coins: u64 = GENESIS_BLOCK_REWARD; + let mut height: u64 = 0; + let mut zero_reward_blocks = 0; + + let total_blocks = get_epoch_block_offset(12); + + while zero_reward_blocks < 100 { + assert_eq!(calc_mwc_block_overage(height, true), total_coins); + height += 1; + let r = calc_mwc_block_reward(height); + total_coins += r; + if r == 0 { + zero_reward_blocks += 1; + } + if height % 1000000 == 0 { + println!( + "Current height={}, reward={}, coins={}, progress={:.1}%", + height, + r, + total_coins, + height as f64 / total_blocks as f64 * 100.0 + ); + } + } + + println!( + "Finished with height={}, reward={}, coins={}", + height, + calc_mwc_block_reward(height), + total_coins + ); + + assert_eq!(total_coins, 20000000000000000); + assert!(height > get_epoch_block_offset(12) + 99); + + // Test finished with output: + // Current height=884000000, reward=10000000, coins=19970529927788020, progress=99.7% + // Current height=885000000, reward=10000000, coins=19980529927788020, progress=99.8% + // Current height=886000000, reward=10000000, coins=19990529927788020, progress=99.9% + // Finished with height=886947108, reward=0, coins=20000000000000000 + // test consensus::test::test_rewards_full_cycle ... ok + } + + // Testing last 1M blocks, srating from the event: height=886000000, reward=10000000, coins=19990529927788020, progress=99.9% + #[test] + fn test_last_epoch() { + global::set_local_chain_type(global::ChainTypes::Mainnet); + + let mut total_coins: u64 = 19990529927788020; + let mut height: u64 = 886000000; + let mut zero_reward_blocks = 0; + + while zero_reward_blocks < 100 { + assert_eq!(calc_mwc_block_overage(height, true), total_coins); + height += 1; + let r = calc_mwc_block_reward(height); + total_coins += r; + if r == 0 { + zero_reward_blocks += 1; + } + } + assert_eq!(total_coins, 20000000000000000); + assert!(height > get_epoch_block_offset(12) + 99); + } } diff --git a/core/src/core.rs b/core/src/core.rs index 366692d82a..8732e6c8bf 100644 --- a/core/src/core.rs +++ b/core/src/core.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -23,7 +23,6 @@ pub mod id; pub mod merkle_proof; pub mod pmmr; pub mod transaction; -pub mod verifier_cache; use crate::consensus::GRIN_BASE; use util::secp::pedersen::Commitment; @@ -33,13 +32,14 @@ pub use self::block_sums::*; pub use self::committed::Committed; pub use self::compact_block::*; pub use self::id::ShortId; +pub use self::pmmr::segment::*; pub use self::transaction::*; /// Common errors -#[derive(Fail, Debug)] +#[derive(thiserror::Error, Debug, Clone, Eq, PartialEq)] pub enum Error { /// Human readable represenation of amount is invalid - #[fail(display = "Invalid amount string, {}", _0)] + #[error("Invalid amount string, {0}")] InvalidAmountString(String), } diff --git a/core/src/core/block.rs b/core/src/core/block.rs index 6cd2315477..72bc165f47 100644 --- a/core/src/core/block.rs +++ b/core/src/core/block.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,7 +18,6 @@ use crate::consensus::{self, calc_mwc_block_overage, calc_mwc_block_reward, rewa use crate::core::committed::{self, Committed}; use crate::core::compact_block::CompactBlock; use crate::core::hash::{DefaultHashable, Hash, Hashed, ZERO_HASH}; -use crate::core::verifier_cache::VerifierCache; use crate::core::{ pmmr, transaction, Commitment, Inputs, KernelFeatures, Output, Transaction, TransactionBody, TxKernel, Weighting, @@ -28,73 +27,69 @@ use crate::pow::{verify_size, Difficulty, Proof, ProofOfWork}; use crate::ser::{ self, deserialize_default, serialize_default, PMMRable, Readable, Reader, Writeable, Writer, }; -use chrono::naive::{MAX_DATE, MIN_DATE}; -use chrono::prelude::{DateTime, NaiveDateTime, Utc}; +use chrono::prelude::{DateTime, Utc}; use chrono::Duration; -use failure::Fail; use keychain::{self, BlindingFactor}; use std::convert::TryInto; -use std::sync::Arc; use util::from_hex; -use util::RwLock; use util::{secp, static_secp_instance}; /// Errors thrown by Block validation -#[derive(Fail, Debug, Clone, Eq, PartialEq)] +#[derive(thiserror::Error, Debug, Clone, Eq, PartialEq)] pub enum Error { /// The sum of output minus input commitments does not /// match the sum of kernel commitments - #[fail(display = "Block Input/ouput vs kernel sum mismatch")] + #[error("Block Input/output vs kernel sum mismatch")] KernelSumMismatch, /// The total kernel sum on the block header is wrong - #[fail(display = "Block Invalid total kernel sum")] + #[error("Block Invalid total kernel sum")] InvalidTotalKernelSum, /// Same as above but for the coinbase part of a block, including reward - #[fail(display = "Block Invalid total kernel sum plus reward")] + #[error("Block Invalid total kernel sum plus reward")] CoinbaseSumMismatch, /// Restrict block total weight. - #[fail(display = "Block total weight is too heavy")] + #[error("Block total weight is too heavy")] TooHeavy, /// Block version is invalid for a given block height - #[fail(display = "Block version {:?} is invalid", _0)] + #[error("Block version {0:?} is invalid")] InvalidBlockVersion(HeaderVersion), /// Block time is invalid - #[fail(display = "Block time is invalid")] + #[error("Block time is invalid")] InvalidBlockTime, /// Invalid POW - #[fail(display = "Invalid POW")] + #[error("Invalid POW")] InvalidPow, /// Kernel not valid due to lock_height exceeding block header height - #[fail(display = "Block lock_height {} exceeding header height {}", _0, _1)] + #[error("Block lock_height {0} exceeding header height {1}")] KernelLockHeight(u64, u64), /// NRD kernels are not valid prior to HF3. - #[fail(display = "NRD kernels are not valid prior to HF3")] + #[error("NRD kernels are not valid prior to HF3")] NRDKernelPreHF3, /// NRD kernels are not valid if disabled locally via "feature flag". - #[fail(display = "NRD kernels are not valid, disabled locally via 'feature flag'")] + #[error("NRD kernels are not valid, disabled locally via 'feature flag'")] NRDKernelNotEnabled, /// Underlying tx related error - #[fail(display = "Block Invalid Transaction, {}", _0)] + #[error("Block Invalid Transaction, {0}")] Transaction(transaction::Error), /// Underlying Secp256k1 error (signature validation or invalid public key /// typically) - #[fail(display = "Secp256k1 error, {}", _0)] + #[error("Secp256k1 error, {0}")] Secp(secp::Error), /// Underlying keychain related error - #[fail(display = "keychain error, {}", _0)] + #[error("keychain error, {0}")] Keychain(keychain::Error), /// Error when verifying kernel sums via committed trait. - #[fail(display = "Block Commits error, {}", _0)] + #[error("Block Commits error, {0}")] Committed(committed::Error), /// Validation error relating to cut-through. /// Specifically the tx is spending its own output, which is not valid. - #[fail(display = "Block cut-through error")] + #[error("Block cut-through error")] CutThrough, /// Underlying serialization error. - #[fail(display = "Block serialization error, {}", _0)] + #[error("Block serialization error, {0}")] Serialization(ser::Error), /// Other unspecified error condition - #[fail(display = "Block Generic error, {}", _0)] + #[error("Block Generic error, {0}")] Other(String), } @@ -244,7 +239,7 @@ impl Default for BlockHeader { BlockHeader { version: HeaderVersion(1), height: 0, - timestamp: DateTime::::from_utc(NaiveDateTime::from_timestamp(0, 0), Utc), + timestamp: DateTime::from_timestamp(0, 0).unwrap().to_utc(), prev_hash: ZERO_HASH, prev_root: ZERO_HASH, output_root: ZERO_HASH, @@ -301,8 +296,8 @@ fn read_block_header(reader: &mut R) -> Result MAX_DATE.and_hms(0, 0, 0).timestamp() - || timestamp < MIN_DATE.and_hms(0, 0, 0).timestamp() + if timestamp > chrono::DateTime::::MAX_UTC.timestamp() + || timestamp < chrono::DateTime::::MIN_UTC.timestamp() { return Err(ser::Error::CorruptedData(format!( "Incorrect timestamp {} at block header", @@ -310,10 +305,15 @@ fn read_block_header(reader: &mut R) -> Result::from_utc(NaiveDateTime::from_timestamp(timestamp, 0), Utc), + timestamp: ts.unwrap(), prev_hash, prev_root, output_root, @@ -503,11 +503,8 @@ impl Readable for UntrustedBlockHeader { } // Validate global output and kernel MMR sizes against upper bounds based on block height. - let global_weight = TransactionBody::weight_as_block( - 0, - header.output_mmr_count(), - header.kernel_mmr_count(), - ); + let global_weight = + TransactionBody::weight_by_iok(0, header.output_mmr_count(), header.kernel_mmr_count()); if global_weight > global::max_block_weight() * (header.height + 1) { return Err(ser::Error::CorruptedData( "Tx global weight is exceed the limit".to_string(), @@ -688,8 +685,13 @@ impl Block { let version = consensus::header_version(height); let now = Utc::now().timestamp(); - let timestamp = DateTime::::from_utc(NaiveDateTime::from_timestamp(now, 0), Utc); + let ts = DateTime::from_timestamp(now, 0); + if ts.is_none() { + return Err(Error::Other("Converting Utc::now() into timestamp".into())); + } + + let timestamp = ts.unwrap(); // Now build the block with all the above information. // Note: We have not validated the block here. // Caller must validate the block as necessary. @@ -736,7 +738,7 @@ impl Block { /// Sum of all fees (inputs less outputs) in the block pub fn total_fees(&self) -> u64 { - self.body.fee() + self.body.fee(self.header.height) } /// "Lightweight" validation that we can perform quickly during read/deserialization. @@ -771,12 +773,8 @@ impl Block { /// Validates all the elements in a block that can be checked without /// additional data. Includes commitment sums and kernels, Merkle /// trees, reward, etc. - pub fn validate( - &self, - prev_kernel_offset: &BlindingFactor, - verifier: Arc>, - ) -> Result { - self.body.validate(Weighting::AsBlock, verifier)?; + pub fn validate(&self, prev_kernel_offset: &BlindingFactor) -> Result { + self.body.validate(Weighting::AsBlock)?; self.verify_kernel_lock_heights()?; self.verify_nrd_kernels_for_header_version()?; @@ -815,13 +813,10 @@ impl Block { let secp = secp.lock(); let over_commit = secp.commit_value(reward(self.total_fees(), self.header.height))?; - let out_adjust_sum = secp::Secp256k1::commit_sum( - map_vec!(cb_outs, |x| x.commitment()), - vec![over_commit], - )?; + let out_adjust_sum = + secp.commit_sum(map_vec!(cb_outs, |x| x.commitment()), vec![over_commit])?; - let kerns_sum = - secp::Secp256k1::commit_sum(cb_kerns.iter().map(|x| x.excess).collect(), vec![])?; + let kerns_sum = secp.commit_sum(cb_kerns.iter().map(|x| x.excess).collect(), vec![])?; // Verify the kernel sum equals the output sum accounting for block fees. if kerns_sum != out_adjust_sum { diff --git a/core/src/core/block_sums.rs b/core/src/core/block_sums.rs index b6b05c8863..923b244fb8 100644 --- a/core/src/core/block_sums.rs +++ b/core/src/core/block_sums.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/core/src/core/committed.rs b/core/src/core/committed.rs index a0c5f4802e..dc5eac8590 100644 --- a/core/src/core/committed.rs +++ b/core/src/core/committed.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,27 +14,25 @@ //! The Committed trait and associated errors. -use failure::Fail; -use keychain; use keychain::BlindingFactor; use util::secp::key::SecretKey; use util::secp::pedersen::Commitment; use util::{secp, secp_static, static_secp_instance}; /// Errors from summing and verifying kernel excesses via committed trait. -#[derive(Debug, Clone, PartialEq, Eq, Fail, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, thiserror::Error, Serialize, Deserialize)] pub enum Error { /// Keychain related error. - #[fail(display = "Keychain error {}", _0)] + #[error("Keychain error {0}")] Keychain(keychain::Error), /// Secp related error. - #[fail(display = "Secp error {}", _0)] + #[error("Secp error {0}")] Secp(secp::Error), /// Kernel sums do not equal output sums. - #[fail(display = "Kernel sum mismatch")] + #[error("Kernel sum mismatch")] KernelSumMismatch, /// Committed overage (fee or reward) is invalid - #[fail(display = "Invalid value")] + #[error("Invalid value")] InvalidValue, } @@ -73,11 +71,11 @@ pub trait Committed { let secp = secp.lock(); let mut commits = vec![kernel_sum]; if *offset != BlindingFactor::zero() { - let key = offset.secret_key()?; + let key = offset.secret_key(&secp)?; let offset_commit = secp.commit(0, key)?; commits.push(offset_commit); } - secp::Secp256k1::commit_sum(commits, vec![])? + secp.commit_sum(commits, vec![])? }; Ok((kernel_sum, kernel_sum_plus_offset)) @@ -147,7 +145,9 @@ pub fn sum_commits( let zero_commit = secp_static::commit_to_zero_value(); positive.retain(|x| *x != zero_commit); negative.retain(|x| *x != zero_commit); - Ok(secp::Secp256k1::commit_sum(positive, negative)?) + let secp = static_secp_instance(); + let secp = secp.lock(); + Ok(secp.commit_sum(positive, negative)?) } /// Utility function to take sets of positive and negative kernel offsets as @@ -157,20 +157,22 @@ pub fn sum_kernel_offsets( positive: Vec, negative: Vec, ) -> Result { - let positive = to_secrets(positive); - let negative = to_secrets(negative); + let secp = static_secp_instance(); + let secp = secp.lock(); + let positive = to_secrets(positive, &secp); + let negative = to_secrets(negative, &secp); if positive.is_empty() { Ok(BlindingFactor::zero()) } else { - let sum = secp::Secp256k1::blind_sum(positive, negative)?; + let sum = secp.blind_sum(positive, negative)?; Ok(BlindingFactor::from_secret_key(sum)) } } -fn to_secrets(bf: Vec) -> Vec { +fn to_secrets(bf: Vec, secp: &secp::Secp256k1) -> Vec { bf.into_iter() .filter(|x| *x != BlindingFactor::zero()) - .filter_map(|x| x.secret_key().ok()) + .filter_map(|x| x.secret_key(secp).ok()) .collect::>() } diff --git a/core/src/core/compact_block.rs b/core/src/core/compact_block.rs index 87945737f9..233fcfe3e5 100644 --- a/core/src/core/compact_block.rs +++ b/core/src/core/compact_block.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/core/src/core/hash.rs b/core/src/core/hash.rs index a008f4a499..3315f96c73 100644 --- a/core/src/core/hash.rs +++ b/core/src/core/hash.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/core/src/core/id.rs b/core/src/core/id.rs index 7214f2893c..a40c63f6e3 100644 --- a/core/src/core/id.rs +++ b/core/src/core/id.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/core/src/core/merkle_proof.rs b/core/src/core/merkle_proof.rs index bde5f83fdd..901de3ed55 100644 --- a/core/src/core/merkle_proof.rs +++ b/core/src/core/merkle_proof.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -21,10 +21,10 @@ use crate::ser::{PMMRIndexHashable, Readable, Reader, Writeable, Writer}; use util::ToHex; /// Merkle proof errors. -#[derive(Fail, Clone, Debug, PartialEq)] +#[derive(thiserror::Error, Clone, Debug, PartialEq)] pub enum MerkleProofError { /// Merkle proof root hash does not match when attempting to verify. - #[fail(display = "Merkle Proof root mismatch")] + #[error("Merkle Proof root mismatch")] RootMismatch, } @@ -114,13 +114,13 @@ impl MerkleProof { &mut self, root: Hash, element: &dyn PMMRIndexHashable, - node_pos: u64, - peaks_pos: &[u64], + node_pos0: u64, + peaks_pos0: &[u64], ) -> Result<(), MerkleProofError> { - let node_hash = if node_pos > self.mmr_size { + let node_hash = if node_pos0 >= self.mmr_size { element.hash_with_index(self.mmr_size) } else { - element.hash_with_index(node_pos - 1) + element.hash_with_index(node_pos0) }; // handle special case of only a single entry in the MMR @@ -134,25 +134,25 @@ impl MerkleProof { } let sibling = self.path.remove(0); - let (parent_pos, sibling_pos) = pmmr::family(node_pos); + let (parent_pos0, sibling_pos0) = pmmr::family(node_pos0); - if let Ok(x) = peaks_pos.binary_search(&node_pos) { - let parent = if x == peaks_pos.len() - 1 { + if let Ok(x) = peaks_pos0.binary_search(&(node_pos0)) { + let parent = if x == peaks_pos0.len() - 1 { (sibling, node_hash) } else { (node_hash, sibling) }; - self.verify(root, &parent, parent_pos) - } else if parent_pos > self.mmr_size { + self.verify(root, &parent, parent_pos0) + } else if parent_pos0 >= self.mmr_size { let parent = (sibling, node_hash); - self.verify(root, &parent, parent_pos) + self.verify(root, &parent, parent_pos0) } else { - let parent = if pmmr::is_left_sibling(sibling_pos) { + let parent = if pmmr::is_left_sibling(sibling_pos0) { (sibling, node_hash) } else { (node_hash, sibling) }; - self.verify(root, &parent, parent_pos) + self.verify(root, &parent, parent_pos0) } } } diff --git a/core/src/core/pmmr.rs b/core/src/core/pmmr.rs index b41d894679..6c2387714d 100644 --- a/core/src/core/pmmr.rs +++ b/core/src/core/pmmr.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -40,6 +40,7 @@ mod backend; mod pmmr; mod readonly_pmmr; mod rewindable_pmmr; +pub mod segment; mod vec_backend; pub use self::backend::*; diff --git a/core/src/core/pmmr/backend.rs b/core/src/core/pmmr/backend.rs index 5159c42775..0383c27096 100644 --- a/core/src/core/pmmr/backend.rs +++ b/core/src/core/pmmr/backend.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -27,28 +27,40 @@ pub trait Backend { /// associated data element to flatfile storage (for leaf nodes only). The /// position of the first element of the Vec in the MMR is provided to /// help the implementation. - fn append(&mut self, data: &T, hashes: Vec) -> Result<(), String>; + fn append(&mut self, data: &T, hashes: &[Hash]) -> Result<(), String>; + + /// Rebuilding a PMMR locally from PIBD segments requires pruned subtree support. + /// This allows us to append an existing pruned subtree directly without the underlying leaf nodes. + fn append_pruned_subtree(&mut self, hash: Hash, pos0: u64) -> Result<(), String>; + + /// Append a single hash to the pmmr + fn append_hash(&mut self, hash: Hash) -> Result<(), String>; /// Rewind the backend state to a previous position, as if all append /// operations after that had been canceled. Expects a position in the PMMR /// to rewind to as well as bitmaps representing the positions added and /// removed since the rewind position. These are what we will "undo" /// during the rewind. - fn rewind(&mut self, position: u64, rewind_rm_pos: &Bitmap) -> Result<(), String>; + fn rewind(&mut self, pos1: u64, rewind_rm_pos: &Bitmap) -> Result<(), String>; /// Get a Hash by insertion position. - fn get_hash(&self, position: u64) -> Option; + fn get_hash(&self, pos0: u64) -> Option; /// Get underlying data by insertion position. - fn get_data(&self, position: u64) -> Option; + fn get_data(&self, pos0: u64) -> Option; /// Get a Hash by original insertion position /// (ignoring the remove log). - fn get_from_file(&self, position: u64) -> Option; + fn get_from_file(&self, pos0: u64) -> Option; + + /// Get hash for peak pos. + /// Optimized for reading peak hashes rather than arbitrary pos hashes. + /// Peaks can be assumed to not be compacted. + fn get_peak_from_file(&self, pos0: u64) -> Option; /// Get a Data Element by original insertion position /// (ignoring the remove log). - fn get_data_from_file(&self, position: u64) -> Option; + fn get_data_from_file(&self, pos0: u64) -> Option; /// Iterator over current (unpruned, unremoved) leaf positions. fn leaf_pos_iter(&self) -> Box + '_>; @@ -56,6 +68,9 @@ pub trait Backend { /// Number of leaves fn n_unpruned_leaves(&self) -> u64; + /// Number of leaves up to the given leaf index + fn n_unpruned_leaves_to_index(&self, to_index: u64) -> u64; + /// Iterator over current (unpruned, unremoved) leaf insertion index. /// Note: This differs from underlying MMR pos - [0, 1, 2, 3, 4] vs. [1, 2, 4, 5, 8]. fn leaf_idx_iter(&self, from_idx: u64) -> Box + '_>; @@ -66,9 +81,15 @@ pub trait Backend { /// triggered removal). fn remove(&mut self, position: u64) -> Result<(), String>; + /// Remove a leaf from the leaf set + fn remove_from_leaf_set(&mut self, pos0: u64); + /// Release underlying datafiles and locks fn release_files(&mut self); + /// Reset prune list, used when PIBD is reset + fn reset_prune_list(&mut self); + /// Saves a snapshot of the rewound utxo file with the block hash as /// filename suffix. We need this when sending a txhashset zip file to a /// node for fast sync. diff --git a/core/src/core/pmmr/pmmr.rs b/core/src/core/pmmr/pmmr.rs index 0a91c8d68a..c4f65902fa 100644 --- a/core/src/core/pmmr/pmmr.rs +++ b/core/src/core/pmmr/pmmr.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,8 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::marker; -use std::u64; +use std::{iter, marker, ops::Range, u64}; use croaring::Bitmap; @@ -23,149 +22,128 @@ use crate::core::pmmr::{Backend, ReadonlyPMMR}; use crate::core::BlockHeader; use crate::ser::{PMMRIndexHashable, PMMRable}; -/// 64 bits all ones: 0b11111111...1 -const ALL_ONES: u64 = u64::MAX; +/// Trait with common methods for reading from a PMMR +pub trait ReadablePMMR { + /// Leaf type + type Item; -/// Prunable Merkle Mountain Range implementation. All positions within the tree -/// start at 1 as they're postorder tree traversal positions rather than array -/// indices. -/// -/// Heavily relies on navigation operations within a binary tree. In particular, -/// all the implementation needs to keep track of the MMR structure is how far -/// we are in the sequence of nodes making up the MMR. -pub struct PMMR<'a, T, B> -where - T: PMMRable, - B: Backend, -{ - /// The last position in the PMMR - pub last_pos: u64, - backend: &'a mut B, - // only needed to parameterise Backend - _marker: marker::PhantomData, -} + /// Get the hash at provided position in the MMR. + /// NOTE all positions are 0-based, so a size n MMR has nodes in positions 0 through n-1 + /// just like a Rust Range 0..n + fn get_hash(&self, pos: u64) -> Option; -impl<'a, T, B> PMMR<'a, T, B> -where - T: PMMRable, - B: 'a + Backend, -{ - /// Build a new prunable Merkle Mountain Range using the provided backend. - pub fn new(backend: &'a mut B) -> PMMR<'_, T, B> { - PMMR { - backend, - last_pos: 0, - _marker: marker::PhantomData, - } - } + /// Get the data element at provided position in the MMR. + fn get_data(&self, pos: u64) -> Option; - /// Build a new prunable Merkle Mountain Range pre-initialized until - /// last_pos with the provided backend. - pub fn at(backend: &'a mut B, last_pos: u64) -> PMMR<'_, T, B> { - PMMR { - backend, - last_pos, - _marker: marker::PhantomData, - } - } + /// Get the hash from the underlying MMR file (ignores the remove log). + fn get_from_file(&self, pos: u64) -> Option; - /// Build a "readonly" view of this PMMR. - pub fn readonly_pmmr(&self) -> ReadonlyPMMR<'_, T, B> { - ReadonlyPMMR::at(&self.backend, self.last_pos) - } + /// Get the hash for the provided peak pos. + /// Optimized for reading peak hashes rather than arbitrary pos hashes. + /// Peaks can be assumed to not be compacted. + fn get_peak_from_file(&self, pos: u64) -> Option; - /// Iterator over current (unpruned, unremoved) leaf positions. - pub fn leaf_pos_iter(&self) -> impl Iterator + '_ { - self.backend.leaf_pos_iter() - } + /// Get the data element at provided position in the MMR (ignores the remove log). + fn get_data_from_file(&self, pos: u64) -> Option; - /// Number of leafs in the MMR - pub fn n_unpruned_leaves(&self) -> u64 { - self.backend.n_unpruned_leaves() - } + /// Total size of the tree, including intermediary nodes and ignoring any pruning. + fn unpruned_size(&self) -> u64; + + /// Iterator over current (unpruned, unremoved) leaf positions. + fn leaf_pos_iter(&self) -> Box + '_>; /// Iterator over current (unpruned, unremoved) leaf insertion indices. - pub fn leaf_idx_iter(&self, from_idx: u64) -> impl Iterator + '_ { - self.backend.leaf_idx_iter(from_idx) - } + fn leaf_idx_iter(&self, from_idx: u64) -> Box + '_>; - /// Returns a vec of the peaks of this MMR. - pub fn peaks(&self) -> impl DoubleEndedIterator + '_ { - let peaks_pos = peaks(self.last_pos); - peaks_pos.into_iter().filter_map(move |pi| { - // here we want to get from underlying hash file - // as the pos *may* have been "removed" - self.backend.get_from_file(pi) - }) - } + /// Number of leaves in the MMR + fn n_unpruned_leaves(&self) -> u64; - fn peak_path(&self, peak_pos: u64) -> Vec { - let rhs = self.bag_the_rhs(peak_pos); - let mut res = peaks(self.last_pos) - .into_iter() - .filter(|x| *x < peak_pos) - .filter_map(|x| self.backend.get_from_file(x)) - .collect::>(); - if let Some(rhs) = rhs { - res.push(rhs); - } - res.reverse(); + /// Number of leaves in the MMR up to index + fn n_unpruned_leaves_to_index(&self, to_index: u64) -> u64; - res + /// Is the MMR empty? + fn is_empty(&self) -> bool { + self.unpruned_size() == 0 } /// Takes a single peak position and hashes together /// all the peaks to the right of this peak (if any). /// If this return a hash then this is our peaks sibling. /// If none then the sibling of our peak is the peak to the left. - pub fn bag_the_rhs(&self, peak_pos: u64) -> Option { - let rhs = peaks(self.last_pos) + fn bag_the_rhs(&self, peak_pos0: u64) -> Option { + let size = self.unpruned_size(); + let rhs = peaks(size) .into_iter() - .filter(|x| *x > peak_pos) - .filter_map(|x| self.backend.get_from_file(x)); + .filter(|&x| x > peak_pos0) + .filter_map(|x| self.get_from_file(x)); let mut res = None; for peak in rhs.rev() { res = match res { None => Some(peak), - Some(rhash) => Some((peak, rhash).hash_with_index(self.unpruned_size())), + Some(rhash) => Some((peak, rhash).hash_with_index(size)), } } res } + /// Returns a vec of the peaks of this MMR. + fn peaks(&self) -> Vec { + peaks(self.unpruned_size()) + .into_iter() + .filter_map(move |pi0| self.get_peak_from_file(pi0)) + .collect() + } + + /// Hashes of the peaks excluding `peak_pos`, where the rhs is bagged together + fn peak_path(&self, peak_pos0: u64) -> Vec { + let rhs = self.bag_the_rhs(peak_pos0); + let mut res = peaks(self.unpruned_size()) + .into_iter() + .filter(|&x| x < peak_pos0) + .filter_map(|x| self.get_peak_from_file(x)) + .collect::>(); + if let Some(rhs) = rhs { + res.push(rhs); + } + res.reverse(); + + res + } + /// Computes the root of the MMR. Find all the peaks in the current /// tree and "bags" them to get a single peak. - pub fn root(&self) -> Result { + fn root(&self) -> Result { if self.is_empty() { return Ok(ZERO_HASH); } let mut res = None; - for peak in self.peaks().rev() { + let peaks = self.peaks(); + let mmr_size = self.unpruned_size(); + for peak in peaks.into_iter().rev() { res = match res { None => Some(peak), - Some(rhash) => Some((peak, rhash).hash_with_index(self.unpruned_size())), + Some(rhash) => Some((peak, rhash).hash_with_index(mmr_size)), } } res.ok_or_else(|| "no root, invalid tree".to_owned()) } /// Build a Merkle proof for the element at the given position. - pub fn merkle_proof(&self, pos: u64) -> Result { - debug!("merkle_proof {}, last_pos {}", pos, self.last_pos); + fn merkle_proof(&self, pos0: u64) -> Result { + let size = self.unpruned_size(); + debug!("merkle_proof {}, size {}", pos0, size); // check this pos is actually a leaf in the MMR - if !is_leaf(pos) { - return Err(format!("not a mmr leaf at pos {}", pos)); + if !is_leaf(pos0) { + return Err(format!("not a mmr leaf at pos {}", pos0)); } // check we actually have a hash in the MMR at this pos - self.get_hash(pos) - .ok_or_else(|| format!("no element at pos {}", pos))?; + self.get_hash(pos0) + .ok_or_else(|| format!("no element at pos {}", pos0))?; - let mmr_size = self.unpruned_size(); - - let family_branch = family_branch(pos, self.last_pos); + let family_branch = family_branch(pos0, size); let mut path = family_branch .iter() @@ -174,26 +152,77 @@ where let peak_pos = match family_branch.last() { Some(&(x, _)) => x, - None => pos, + None => pos0, }; path.append(&mut self.peak_path(peak_pos)); - Ok(MerkleProof { mmr_size, path }) + Ok(MerkleProof { + mmr_size: size, + path, + }) + } +} + +/// Prunable Merkle Mountain Range implementation. All positions within the tree +/// start at 0 just like array indices. +/// +/// Heavily relies on navigation operations within a binary tree. In particular, +/// all the implementation needs to keep track of the MMR structure is how far +/// we are in the sequence of nodes making up the MMR. +pub struct PMMR<'a, T, B> +where + T: PMMRable, + B: Backend, +{ + /// Number of nodes in the PMMR + pub size: u64, + backend: &'a mut B, + // only needed to parameterise Backend + _marker: marker::PhantomData, +} + +impl<'a, T, B> PMMR<'a, T, B> +where + T: PMMRable, + B: 'a + Backend, +{ + /// Build a new prunable Merkle Mountain Range using the provided backend. + pub fn new(backend: &'a mut B) -> PMMR<'_, T, B> { + PMMR { + backend, + size: 0, + _marker: marker::PhantomData, + } + } + + /// Build a new prunable Merkle Mountain Range pre-initialized until + /// size with the provided backend. + pub fn at(backend: &'a mut B, size: u64) -> PMMR<'_, T, B> { + PMMR { + backend, + size, + _marker: marker::PhantomData, + } + } + + /// Build a "readonly" view of this PMMR. + pub fn readonly_pmmr(&self) -> ReadonlyPMMR<'_, T, B> { + ReadonlyPMMR::at(&self.backend, self.size) } /// Push a new element into the MMR. Computes new related peaks at /// the same time if applicable. - pub fn push(&mut self, elmt: &T) -> Result { - let elmt_pos = self.last_pos + 1; - let mut current_hash = elmt.hash_with_index(elmt_pos - 1); + pub fn push(&mut self, leaf: &T) -> Result { + let leaf_pos = self.size; + let mut current_hash = leaf.hash_with_index(leaf_pos); let mut hashes = vec![current_hash]; - let mut pos = elmt_pos; + let mut pos = leaf_pos; - let (peak_map, height) = peak_map_height(pos - 1); + let (peak_map, height) = peak_map_height(pos); if height != 0 { - return Err(format!("bad mmr size {}", pos - 1)); + return Err(format!("bad mmr size {}", pos)); } // hash with all immediately preceding peaks, as indicated by peak map let mut peak = 1; @@ -201,18 +230,62 @@ where let left_sibling = pos + 1 - 2 * peak; let left_hash = self .backend - .get_from_file(left_sibling) + .get_peak_from_file(left_sibling) .ok_or("missing left sibling in tree, should not have been pruned")?; peak *= 2; pos += 1; - current_hash = (left_hash, current_hash).hash_with_index(pos - 1); + current_hash = (left_hash, current_hash).hash_with_index(pos); hashes.push(current_hash); } // append all the new nodes and update the MMR index - self.backend.append(elmt, hashes)?; - self.last_pos = pos; - Ok(elmt_pos) + self.backend.append(leaf, &hashes)?; + self.size = pos + 1; + Ok(leaf_pos) + } + + /// Push a pruned subtree into the PMMR + pub fn push_pruned_subtree(&mut self, hash: Hash, pos0: u64) -> Result<(), String> { + // First append the subtree + self.backend.append_pruned_subtree(hash, pos0)?; + self.size = pos0 + 1; + + let mut pos = pos0; + let mut current_hash = hash; + + let (peak_map, _) = peak_map_height(pos); + + // Then hash with all immediately preceding peaks, as indicated by peak map + let mut peak = 1; + while (peak_map & peak) != 0 { + let (parent, sibling) = family(pos); + peak *= 2; + if sibling > pos { + // is right sibling, we should be done + continue; + } + let left_hash = self + .backend + .get_hash(sibling) + .ok_or("missing left sibling in tree, should not have been pruned")?; + pos = parent; + current_hash = (left_hash, current_hash).hash_with_index(parent); + self.backend.append_hash(current_hash)?; + } + + // Round size up to next leaf, ready for insertion + self.size = crate::core::pmmr::round_up_to_leaf_pos(pos); + Ok(()) + } + + /// Reset prune list + pub fn reset_prune_list(&mut self) { + self.backend.reset_prune_list(); + } + + /// Remove the specified position from the leaf set + pub fn remove_from_leaf_set(&mut self, pos0: u64) { + self.backend.remove_from_leaf_set(pos0); } /// Saves a snapshot of the MMR tagged with the block hash. @@ -231,13 +304,9 @@ where // Identify which actual position we should rewind to as the provided // position is a leaf. We traverse the MMR to include any parent(s) that // need to be included for the MMR to be valid. - let mut pos = position; - while bintree_postorder_height(pos + 1) > 0 { - pos += 1; - } - - self.backend.rewind(pos, rewind_rm_pos)?; - self.last_pos = pos; + let leaf_pos = round_up_to_leaf_pos(position); + self.backend.rewind(leaf_pos, rewind_rm_pos)?; + self.size = leaf_pos; Ok(()) } @@ -245,63 +314,23 @@ where /// Returns an error if prune is called on a non-leaf position. /// Returns false if the leaf node has already been pruned. /// Returns true if pruning is successful. - pub fn prune(&mut self, position: u64) -> Result { - if !is_leaf(position) { - return Err(format!( - "pmmr node at {} is not a leaf, can't prune.", - position - )); + pub fn prune(&mut self, pos0: u64) -> Result { + if !is_leaf(pos0) { + return Err(format!("Node at {} is not a leaf, can't prune.", pos0)); } - if self.backend.get_hash(position).is_none() { + if self.backend.get_hash(pos0).is_none() { return Ok(false); } - self.backend.remove(position)?; + self.backend.remove(pos0)?; Ok(true) } - /// Get the hash at provided position in the MMR. - pub fn get_hash(&self, pos: u64) -> Option { - if pos > self.last_pos { - None - } else if is_leaf(pos) { - // If we are a leaf then get hash from the backend. - self.backend.get_hash(pos) - } else { - // If we are not a leaf get hash ignoring the remove log. - self.backend.get_from_file(pos) - } - } - - /// Get the data element at provided position in the MMR. - pub fn get_data(&self, pos: u64) -> Option { - if pos > self.last_pos { - // If we are beyond the rhs of the MMR return None. - None - } else if is_leaf(pos) { - // If we are a leaf then get data from the backend. - self.backend.get_data(pos) - } else { - // If we are not a leaf then return None as only leaves have data. - None - } - } - - /// Get the hash from the underlying MMR file - /// (ignores the remove log). - fn get_from_file(&self, pos: u64) -> Option { - if pos > self.last_pos { - None - } else { - self.backend.get_from_file(pos) - } - } - /// Walks all unpruned nodes in the MMR and revalidate all parent hashes pub fn validate(&self) -> Result<(), String> { // iterate on all parent nodes - for n in 1..(self.last_pos + 1) { + for n in 0..self.size { let height = bintree_postorder_height(n); if height > 0 { if let Some(hash) = self.get_hash(n) { @@ -311,11 +340,11 @@ where if let Some(left_child_hs) = self.get_from_file(left_pos) { if let Some(right_child_hs) = self.get_from_file(right_pos) { // hash the two child nodes together with parent_pos and compare - if (left_child_hs, right_child_hs).hash_with_index(n - 1) != hash { + if (left_child_hs, right_child_hs).hash_with_index(n) != hash { return Err(format!( "Invalid MMR, hash of parent at {} does \ not match children.", - n + n + 1 )); } } @@ -326,17 +355,6 @@ where Ok(()) } - /// Is the MMR empty? - pub fn is_empty(&self) -> bool { - self.last_pos == 0 - } - - /// Total size of the tree, including intermediary nodes and ignoring any - /// pruning. - pub fn unpruned_size(&self) -> u64 { - self.last_pos - } - /// Debugging utility to print information about the MMRs. Short version /// only prints the last 8 nodes. pub fn dump(&self, short: bool) { @@ -344,7 +362,7 @@ where if sz > 2000 && !short { return; } - let start = if short && sz > 7 { sz / 8 - 1 } else { 0 }; + let start = if short { sz / 8 } else { 0 }; for n in start..(sz / 8 + 1) { let mut idx = "".to_owned(); let mut hashes = "".to_owned(); @@ -352,15 +370,15 @@ where if m >= sz { break; } - idx.push_str(&format!("{:>8} ", m + 1)); - let ohs = self.get_hash(m + 1); + idx.push_str(&format!("{:>8} ", m)); + let ohs = self.get_hash(m); match ohs { Some(hs) => hashes.push_str(&format!("{} ", hs)), None => hashes.push_str(&format!("{:>8} ", "??")), } } - trace!("{}", idx); - trace!("{}", hashes); + debug!("{}", idx); + debug!("{}", hashes); } } @@ -378,7 +396,7 @@ where if sz > 2000 && !short { return; } - let start = if short && sz > 7 { sz / 8 - 1 } else { 0 }; + let start = if short { sz / 8 } else { 0 }; for n in start..(sz / 8 + 1) { let mut idx = "".to_owned(); let mut hashes = "".to_owned(); @@ -387,7 +405,7 @@ where break; } idx.push_str(&format!("{:>8} ", m + 1)); - let ohs = self.get_from_file(m + 1); + let ohs = self.get_from_file(m); match ohs { Some(hs) => hashes.push_str(&format!("{} ", hs)), None => hashes.push_str(&format!("{:>8} ", " .")), @@ -399,198 +417,239 @@ where } } -/// Gets the postorder traversal index of all peaks in a MMR given its size. -/// Starts with the top peak, which is always on the left -/// side of the range, and navigates toward lower siblings toward the right -/// of the range. -pub fn peaks(num: u64) -> Vec { - if num == 0 { - return vec![]; - } - let mut peak_size = ALL_ONES >> num.leading_zeros(); - let mut num_left = num; - let mut sum_prev_peaks = 0; - let mut peaks = vec![]; - while peak_size != 0 { - if num_left >= peak_size { - peaks.push(sum_prev_peaks + peak_size); - sum_prev_peaks += peak_size; - num_left -= peak_size; +impl<'a, T, B> ReadablePMMR for PMMR<'a, T, B> +where + T: PMMRable, + B: 'a + Backend, +{ + type Item = T::E; + + fn get_hash(&self, pos0: u64) -> Option { + if pos0 >= self.size { + None + } else if is_leaf(pos0) { + // If we are a leaf then get hash from the backend. + self.backend.get_hash(pos0) + } else { + // If we are not a leaf get hash ignoring the remove log. + self.backend.get_from_file(pos0) + } + } + + fn get_data(&self, pos0: u64) -> Option { + if pos0 >= self.size { + // If we are beyond the rhs of the MMR return None. + None + } else if is_leaf(pos0) { + // If we are a leaf then get data from the backend. + self.backend.get_data(pos0) + } else { + // If we are not a leaf then return None as only leaves have data. + None } - peak_size >>= 1; } - if num_left > 0 { - return vec![]; + + fn get_from_file(&self, pos0: u64) -> Option { + if pos0 >= self.size { + None + } else { + self.backend.get_from_file(pos0) + } } - peaks -} -/// The number of leaves in a MMR of the provided size. -pub fn n_leaves(size: u64) -> u64 { - let (sizes, height) = peak_sizes_height(size); - let nleaves = sizes.into_iter().map(|n| (n + 1) / 2 as u64).sum(); - if height == 0 { - nleaves - } else { - nleaves + 1 + fn get_peak_from_file(&self, pos0: u64) -> Option { + if pos0 >= self.size { + None + } else { + self.backend.get_peak_from_file(pos0) + } + } + + fn get_data_from_file(&self, pos0: u64) -> Option { + if pos0 >= self.size { + None + } else { + self.backend.get_data_from_file(pos0) + } + } + + fn unpruned_size(&self) -> u64 { + self.size + } + + fn leaf_pos_iter(&self) -> Box + '_> { + self.backend.leaf_pos_iter() + } + + fn leaf_idx_iter(&self, from_idx: u64) -> Box + '_> { + self.backend.leaf_idx_iter(from_idx) + } + + fn n_unpruned_leaves(&self) -> u64 { + self.backend.n_unpruned_leaves() } -} -/// Returns the pmmr index of the nth inserted element -pub fn insertion_to_pmmr_index(mut sz: u64) -> u64 { - if sz == 0 { - return 0; + fn n_unpruned_leaves_to_index(&self, to_index: u64) -> u64 { + self.backend.n_unpruned_leaves_to_index(to_index) } - // 1 based pmmrs - sz -= 1; - 2 * sz - sz.count_ones() as u64 + 1 } -/// sizes of peaks and height of next node in mmr of given size -/// Example: on input 5 returns ([3,1], 1) as mmr state before adding 5 was +/// 64 bits all ones: 0b11111111...1 +const ALL_ONES: u64 = u64::MAX; + +/// peak bitmap and height of next node in mmr of given size +/// Example: on size 4 returns (0b11, 0) as mmr tree of size 4 is /// 2 /// / \ -/// 0 1 3 4 -pub fn peak_sizes_height(size: u64) -> (Vec, u64) { +/// 0 1 3 +/// with 0b11 indicating the presence of peaks of height 0 and 1, +/// and 0 the height of the next node 4, which is a leaf +/// NOTE: +/// the peak map also encodes the path taken from the root to the added node +/// since the path turns left (resp. right) if-and-only-if +/// a peak at that height is absent (resp. present) +pub fn peak_map_height(mut size: u64) -> (u64, u64) { if size == 0 { - return (vec![], 0); + // rust can't shift right by 64 + return (0, 0); } let mut peak_size = ALL_ONES >> size.leading_zeros(); - let mut sizes = vec![]; - let mut size_left = size; + let mut peak_map = 0; while peak_size != 0 { - if size_left >= peak_size { - sizes.push(peak_size); - size_left -= peak_size; + peak_map <<= 1; + if size >= peak_size { + size -= peak_size; + peak_map |= 1; } peak_size >>= 1; } - (sizes, size_left) + (peak_map, size) } -/// return (peak_map, pos_height) of given 0-based node pos prior to its -/// addition -/// Example: on input 4 returns (0b11, 0) as mmr state before adding 4 was +/// sizes of peaks and height of next node in mmr of given size +/// similar to peak_map_height but replacing bitmap by vector of sizes +/// Example: on input 5 returns ([3,1], 1) as mmr state before adding 5 was /// 2 /// / \ -/// 0 1 3 -/// with 0b11 indicating presence of peaks of height 0 and 1. -/// NOTE: -/// the peak map also encodes the path taken from the root to the added node -/// since the path turns left (resp. right) if-and-only-if -/// a peak at that height is absent (resp. present) -pub fn peak_map_height(mut pos: u64) -> (u64, u64) { - if pos == 0 { - return (0, 0); +/// 0 1 3 4 +pub fn peak_sizes_height(mut size: u64) -> (Vec, u64) { + if size == 0 { + // rust can't shift right by 64 + return (vec![], 0); } - let mut peak_size = ALL_ONES >> pos.leading_zeros(); - let mut bitmap = 0; + let mut peak_size = ALL_ONES >> size.leading_zeros(); + let mut peak_sizes = vec![]; while peak_size != 0 { - bitmap <<= 1; - if pos >= peak_size { - pos -= peak_size; - bitmap |= 1; + if size >= peak_size { + peak_sizes.push(peak_size); + size -= peak_size; } peak_size >>= 1; } - (bitmap, pos) + (peak_sizes, size) } -/// The height of a node in a full binary tree from its postorder traversal -/// index. This function is the base on which all others, as well as the MMR, -/// are built. -pub fn bintree_postorder_height(num: u64) -> u64 { - if num == 0 { - return 0; +/// Gets the postorder traversal 0-based index of all peaks in a MMR given its size. +/// Starts with the top peak, which is always on the left +/// side of the range, and navigates toward lower siblings toward the right +/// of the range. +/// For some odd reason, return empty when next node is not a leaf +pub fn peaks(size: u64) -> Vec { + let (peak_sizes, height) = peak_sizes_height(size); + if height == 0 { + peak_sizes + .iter() + .scan(0, |acc, &x| { + *acc += &x; + Some(*acc) + }) + .map(|x| x - 1) // rust doesn't allow starting scan with -1 as u64 + .collect() + } else { + vec![] + } +} +/// The number of leaves in a MMR of the provided size. +pub fn n_leaves(size: u64) -> u64 { + let (peak_map, height) = peak_map_height(size); + if height == 0 { + peak_map + } else { + peak_map + 1 } - peak_map_height(num - 1).1 +} + +/// returns least position >= pos0 with height 0 +pub fn round_up_to_leaf_pos(pos0: u64) -> u64 { + let (insert_idx, height) = peak_map_height(pos0); + let leaf_idx = if height == 0 { + insert_idx + } else { + insert_idx + 1 + }; + return insertion_to_pmmr_index(leaf_idx); +} + +/// Returns the 0-based pmmr index of 0-based leaf index n +pub fn insertion_to_pmmr_index(nleaf0: u64) -> u64 { + 2 * nleaf0 - nleaf0.count_ones() as u64 +} + +/// Returns the insertion index of the given leaf index +pub fn pmmr_leaf_to_insertion_index(pos0: u64) -> Option { + let (insert_idx, height) = peak_map_height(pos0); + if height == 0 { + Some(insert_idx) + } else { + None + } +} + +/// The height of a node in a full binary tree from its postorder traversal +/// index. +pub fn bintree_postorder_height(pos0: u64) -> u64 { + peak_map_height(pos0).1 } /// Is this position a leaf in the MMR? /// We know the positions of all leaves based on the postorder height of an MMR /// of any size (somewhat unintuitively but this is how the PMMR is "append /// only"). -pub fn is_leaf(pos: u64) -> bool { - bintree_postorder_height(pos) == 0 +pub fn is_leaf(pos0: u64) -> bool { + bintree_postorder_height(pos0) == 0 } /// Calculates the positions of the parent and sibling of the node at the /// provided position. -pub fn family(pos: u64) -> (u64, u64) { - let (peak_map, height) = peak_map_height(pos - 1); +pub fn family(pos0: u64) -> (u64, u64) { + let (peak_map, height) = peak_map_height(pos0); let peak = 1 << height; if (peak_map & peak) != 0 { - (pos + 1, pos + 1 - 2 * peak) + (pos0 + 1, pos0 + 1 - 2 * peak) } else { - (pos + 2 * peak, pos + 2 * peak - 1) + (pos0 + 2 * peak, pos0 + 2 * peak - 1) } } /// Is the node at this pos the "left" sibling of its parent? -pub fn is_left_sibling(pos: u64) -> bool { - let (peak_map, height) = peak_map_height(pos - 1); +pub fn is_left_sibling(pos0: u64) -> bool { + let (peak_map, height) = peak_map_height(pos0); let peak = 1 << height; (peak_map & peak) == 0 } -/// Returns the path from the specified position up to its -/// corresponding peak in the MMR. -/// The size (and therefore the set of peaks) of the MMR -/// is defined by last_pos. -pub fn path(pos: u64, last_pos: u64) -> impl Iterator { - Path::new(pos, last_pos) -} - -struct Path { - current: u64, - last_pos: u64, - peak: u64, - peak_map: u64, -} - -impl Path { - fn new(pos: u64, last_pos: u64) -> Self { - let (peak_map, height) = peak_map_height(pos - 1); - Path { - current: pos, - peak: 1 << height, - peak_map, - last_pos, - } - } -} - -impl Iterator for Path { - type Item = u64; - - fn next(&mut self) -> Option { - if self.current > self.last_pos { - return None; - } - - let next = Some(self.current); - self.current += if (self.peak_map & self.peak) != 0 { - 1 - } else { - 2 * self.peak - }; - self.peak <<= 1; - next - } -} - /// For a given starting position calculate the parent and sibling positions /// for the branch/path from that position to the peak of the tree. /// We will use the sibling positions to generate the "path" of a Merkle proof. -pub fn family_branch(pos: u64, last_pos: u64) -> Vec<(u64, u64)> { +pub fn family_branch(pos0: u64, size: u64) -> Vec<(u64, u64)> { // loop going up the tree, from node to parent, as long as we stay inside - // the tree (as defined by last_pos). - let (peak_map, height) = peak_map_height(pos - 1); + // the tree (as defined by size). + let (peak_map, height) = peak_map_height(pos0); let mut peak = 1 << height; let mut branch = vec![]; - let mut current = pos; + let mut current = pos0; let mut sibling; - while current < last_pos { + while current + 1 < size { if (peak_map & peak) != 0 { current += 1; sibling = current - 2 * peak; @@ -598,7 +657,7 @@ pub fn family_branch(pos: u64, last_pos: u64) -> Vec<(u64, u64)> { current += 2 * peak; sibling = current - 1; }; - if current > last_pos { + if current >= size { break; } branch.push((current, sibling)); @@ -608,12 +667,40 @@ pub fn family_branch(pos: u64, last_pos: u64) -> Vec<(u64, u64)> { } /// Gets the position of the rightmost node (i.e. leaf) beneath the provided subtree root. -pub fn bintree_rightmost(num: u64) -> u64 { - num - bintree_postorder_height(num) +pub fn bintree_rightmost(pos0: u64) -> u64 { + pos0 - bintree_postorder_height(pos0) } -/// Gets the position of the rightmost node (i.e. leaf) beneath the provided subtree root. -pub fn bintree_leftmost(num: u64) -> u64 { - let height = bintree_postorder_height(num); - num + 2 - (2 << height) +/// Gets the position of the leftmost node (i.e. leaf) beneath the provided subtree root. +pub fn bintree_leftmost(pos0: u64) -> u64 { + let height = bintree_postorder_height(pos0); + pos0 + 2 - (2 << height) +} + +/// Iterator over all leaf pos beneath the provided subtree root (including the root itself). +pub fn bintree_leaf_pos_iter(pos0: u64) -> Box> { + let leaf_start = pmmr_leaf_to_insertion_index(bintree_leftmost(pos0)); + let leaf_end = pmmr_leaf_to_insertion_index(bintree_rightmost(pos0)); + let leaf_start = match leaf_start { + Some(l) => l, + None => return Box::new(iter::empty::()), + }; + let leaf_end = match leaf_end { + Some(l) => l, + None => return Box::new(iter::empty::()), + }; + Box::new((leaf_start..=leaf_end).map(|n| insertion_to_pmmr_index(n))) +} + +/// Iterator over all pos beneath the provided subtree root (including the root itself). +pub fn bintree_pos_iter(pos0: u64) -> impl Iterator { + let leaf_start = bintree_leftmost(pos0); + (leaf_start..=pos0).into_iter() +} + +/// All pos in the subtree beneath the provided root, including root itself. +pub fn bintree_range(pos0: u64) -> Range { + let height = bintree_postorder_height(pos0); + let leftmost = pos0 + 2 - (2 << height); + leftmost..(pos0 + 1) } diff --git a/core/src/core/pmmr/readonly_pmmr.rs b/core/src/core/pmmr/readonly_pmmr.rs index de2f24495f..b9f720e520 100644 --- a/core/src/core/pmmr/readonly_pmmr.rs +++ b/core/src/core/pmmr/readonly_pmmr.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,10 +16,10 @@ use std::marker; -use crate::core::hash::{Hash, ZERO_HASH}; -use crate::core::pmmr::pmmr::{bintree_rightmost, peaks}; +use crate::core::hash::Hash; +use crate::core::pmmr::pmmr::{bintree_rightmost, ReadablePMMR}; use crate::core::pmmr::{is_leaf, Backend}; -use crate::ser::{PMMRIndexHashable, PMMRable}; +use crate::ser::PMMRable; /// Readonly view of a PMMR. pub struct ReadonlyPMMR<'a, T, B> @@ -28,7 +28,7 @@ where B: Backend, { /// The last position in the PMMR - last_pos: u64, + size: u64, /// The backend for this readonly PMMR backend: &'a B, // only needed to parameterise Backend @@ -44,153 +44,140 @@ where pub fn new(backend: &'a B) -> ReadonlyPMMR<'_, T, B> { ReadonlyPMMR { backend, - last_pos: 0, + size: 0, _marker: marker::PhantomData, } } /// Build a new readonly PMMR pre-initialized to - /// last_pos with the provided backend. - pub fn at(backend: &'a B, last_pos: u64) -> ReadonlyPMMR<'_, T, B> { + /// size with the provided backend. + pub fn at(backend: &'a B, size: u64) -> ReadonlyPMMR<'_, T, B> { ReadonlyPMMR { backend, - last_pos, + size, _marker: marker::PhantomData, } } - /// Get the data element at provided position in the MMR. - pub fn get_data(&self, pos: u64) -> Option { - if pos > self.last_pos { - // If we are beyond the rhs of the MMR return None. - None - } else if is_leaf(pos) { - // If we are a leaf then get data from the backend. - self.backend.get_data(pos) - } else { - // If we are not a leaf then return None as only leaves have data. - None - } - } - - /// Get the hash at provided position in the MMR. - pub fn get_hash(&self, pos: u64) -> Option { - if pos > self.last_pos { - None - } else if is_leaf(pos) { - // If we are a leaf then get hash from the backend. - self.backend.get_hash(pos) - } else { - // If we are not a leaf get hash ignoring the remove log. - self.backend.get_from_file(pos) - } - } - - /// Get the hash from the underlying MMR file, ignoring the leafset. - /// Some entries may have been removed from the leafset but not yet pruned from the file. - pub fn get_from_file(&self, pos: u64) -> Option { - if pos > self.last_pos { - None - } else { - self.backend.get_from_file(pos) - } - } - - /// Iterator over current (unpruned, unremoved) leaf positions. - pub fn leaf_pos_iter(&self) -> impl Iterator + '_ { - self.backend.leaf_pos_iter() - } - - /// Iterator over current (unpruned, unremoved) leaf insertion indices. - pub fn leaf_idx_iter(&self, from_idx: u64) -> impl Iterator + '_ { - self.backend.leaf_idx_iter(from_idx) - } - - /// Is the MMR empty? - pub fn is_empty(&self) -> bool { - self.last_pos == 0 - } - - /// Computes the root of the MMR. Find all the peaks in the current - /// tree and "bags" them to get a single peak. - pub fn root(&self) -> Hash { - if self.is_empty() { - return ZERO_HASH; - } - let mut res = None; - for peak in self.peaks().iter().rev() { - res = match res { - None => Some(*peak), - Some(rhash) => Some((*peak, rhash).hash_with_index(self.unpruned_size())), - } - } - res.expect("no root, invalid tree") - } - - /// Returns a vec of the peaks of this MMR. - pub fn peaks(&self) -> Vec { - let peaks_pos = peaks(self.last_pos); - peaks_pos - .into_iter() - .filter_map(|pi| { - // here we want to get from underlying hash file - // as the pos *may* have been "removed" - self.backend.get_from_file(pi) - }) - .collect() - } - - /// Total size of the tree, including intermediary nodes and ignoring any - /// pruning. - pub fn unpruned_size(&self) -> u64 { - self.last_pos - } - /// Helper function which returns un-pruned nodes from the insertion index /// forward /// returns last pmmr index returned along with data pub fn elements_from_pmmr_index( &self, - mut pmmr_index: u64, + pmmr_index1: u64, max_count: u64, - max_pmmr_pos: Option, + max_pmmr_pos1: Option, ) -> (u64, Vec) { let mut return_vec = vec![]; - let last_pos = match max_pmmr_pos { + let size = match max_pmmr_pos1 { Some(p) => p, - None => self.last_pos, + None => self.size, }; - if pmmr_index == 0 { - pmmr_index = 1; - } - while return_vec.len() < max_count as usize && pmmr_index <= last_pos { + let mut pmmr_index = pmmr_index1.saturating_sub(1); + + while return_vec.len() < max_count as usize && pmmr_index < size { if let Some(t) = self.get_data(pmmr_index) { return_vec.push(t); } pmmr_index += 1; } - (pmmr_index.saturating_sub(1), return_vec) + (pmmr_index, return_vec) } /// Helper function to get the last N nodes inserted, i.e. the last /// n nodes along the bottom of the tree. /// May return less than n items if the MMR has been pruned/compacted. + /// NOTE This should just iterate over insertion indices + /// to avoid the repeated calls to bintree_rightmost! pub fn get_last_n_insertions(&self, n: u64) -> Vec<(Hash, T::E)> { let mut return_vec = vec![]; - let mut last_leaf = self.last_pos; - for _ in 0..n as u64 { - if last_leaf == 0 { - break; - } - last_leaf = bintree_rightmost(last_leaf); + let mut last_leaf = self.size; + while return_vec.len() < n as usize && last_leaf > 0 { + last_leaf = bintree_rightmost(last_leaf - 1); if let Some(hash) = self.backend.get_hash(last_leaf) { if let Some(data) = self.backend.get_data(last_leaf) { return_vec.push((hash, data)); } } - last_leaf -= 1; } return_vec } } + +impl<'a, T, B> ReadablePMMR for ReadonlyPMMR<'a, T, B> +where + T: PMMRable, + B: 'a + Backend, +{ + type Item = T::E; + + fn get_hash(&self, pos0: u64) -> Option { + if pos0 >= self.size { + None + } else if is_leaf(pos0) { + // If we are a leaf then get hash from the backend. + self.backend.get_hash(pos0) + } else { + // If we are not a leaf get hash ignoring the remove log. + self.backend.get_from_file(pos0) + } + } + + fn get_data(&self, pos0: u64) -> Option { + if pos0 >= self.size { + // If we are beyond the rhs of the MMR return None. + None + } else if is_leaf(pos0) { + // If we are a leaf then get data from the backend. + self.backend.get_data(pos0) + } else { + // If we are not a leaf then return None as only leaves have data. + None + } + } + + fn get_from_file(&self, pos0: u64) -> Option { + if pos0 >= self.size { + None + } else { + self.backend.get_from_file(pos0) + } + } + + fn get_peak_from_file(&self, pos0: u64) -> Option { + if pos0 >= self.size { + None + } else { + self.backend.get_peak_from_file(pos0) + } + } + + fn get_data_from_file(&self, pos0: u64) -> Option { + if pos0 >= self.size { + None + } else { + self.backend.get_data_from_file(pos0) + } + } + + fn unpruned_size(&self) -> u64 { + self.size + } + + fn leaf_pos_iter(&self) -> Box + '_> { + self.backend.leaf_pos_iter() + } + + fn leaf_idx_iter(&self, from_idx: u64) -> Box + '_> { + self.backend.leaf_idx_iter(from_idx) + } + + fn n_unpruned_leaves(&self) -> u64 { + self.backend.n_unpruned_leaves() + } + + fn n_unpruned_leaves_to_index(&self, to_index: u64) -> u64 { + self.backend.n_unpruned_leaves_to_index(to_index) + } +} diff --git a/core/src/core/pmmr/rewindable_pmmr.rs b/core/src/core/pmmr/rewindable_pmmr.rs index 1e33c6fa35..3610d082c4 100644 --- a/core/src/core/pmmr/rewindable_pmmr.rs +++ b/core/src/core/pmmr/rewindable_pmmr.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,9 +17,8 @@ use std::marker; -use crate::core::hash::{Hash, ZERO_HASH}; -use crate::core::pmmr::{bintree_postorder_height, is_leaf, peaks, Backend}; -use crate::ser::{PMMRIndexHashable, PMMRable}; +use crate::core::pmmr::{round_up_to_leaf_pos, Backend, ReadonlyPMMR}; +use crate::ser::PMMRable; /// Rewindable (but still readonly) view of a PMMR. pub struct RewindablePMMR<'a, T, B> @@ -49,11 +48,6 @@ where } } - /// Reference to the underlying storage backend. - pub fn backend(&'a self) -> &dyn Backend { - self.backend - } - /// Build a new readonly PMMR pre-initialized to /// last_pos with the provided backend. pub fn at(backend: &'a B, last_pos: u64) -> RewindablePMMR<'_, T, B> { @@ -70,66 +64,14 @@ where // Identify which actual position we should rewind to as the provided // position is a leaf. We traverse the MMR to include any parent(s) that // need to be included for the MMR to be valid. - let mut pos = position; - while bintree_postorder_height(pos + 1) > 0 { - pos += 1; - } - - self.last_pos = pos; + self.last_pos = round_up_to_leaf_pos(position); Ok(()) } - /// Get the data element at provided position in the MMR. - pub fn get_data(&self, pos: u64) -> Option { - if pos > self.last_pos { - // If we are beyond the rhs of the MMR return None. - None - } else if is_leaf(pos) { - // If we are a leaf then get data from the backend. - self.backend.get_data(pos) - } else { - // If we are not a leaf then return None as only leaves have data. - None - } - } - - /// Is the MMR empty? - pub fn is_empty(&self) -> bool { - self.last_pos == 0 - } - - /// Computes the root of the MMR. Find all the peaks in the current - /// tree and "bags" them to get a single peak. - pub fn root(&self) -> Result { - if self.is_empty() { - return Ok(ZERO_HASH); - } - let mut res = None; - for peak in self.peaks().iter().rev() { - res = match res { - None => Some(*peak), - Some(rhash) => Some((*peak, rhash).hash_with_index(self.unpruned_size())), - } - } - res.ok_or_else(|| "no root, invalid tree".to_owned()) - } - - /// Returns a vec of the peaks of this MMR. - pub fn peaks(&self) -> Vec { - let peaks_pos = peaks(self.last_pos); - peaks_pos - .into_iter() - .filter_map(|pi| { - // here we want to get from underlying hash file - // as the pos *may* have been "removed" - self.backend.get_from_file(pi) - }) - .collect() - } - - /// Total size of the tree, including intermediary nodes and ignoring any - /// pruning. - pub fn unpruned_size(&self) -> u64 { - self.last_pos + /// Allows conversion of a "rewindable" PMMR into a "readonly" PMMR. + /// Intended usage is to create a rewindable PMMR, rewind it, + /// then convert to "readonly" and read from it. + pub fn as_readonly(&self) -> ReadonlyPMMR<'a, T, B> { + ReadonlyPMMR::at(&self.backend, self.last_pos) } } diff --git a/core/src/core/pmmr/segment.rs b/core/src/core/pmmr/segment.rs new file mode 100644 index 0000000000..c66845c98c --- /dev/null +++ b/core/src/core/pmmr/segment.rs @@ -0,0 +1,792 @@ +// Copyright 2021 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Segment of a PMMR. + +use crate::core::hash::Hash; +use crate::core::pmmr::{self, Backend, ReadablePMMR, ReadonlyPMMR}; +use crate::ser::{Error, PMMRIndexHashable, PMMRable, Readable, Reader, Writeable, Writer}; +use croaring::Bitmap; +use std::cmp::min; +use std::fmt; +use std::fmt::{Debug, Display}; + +#[derive(Clone, Debug, Eq, PartialEq)] +/// Possible segment types, according to this desegmenter +pub enum SegmentType { + /// Output Bitmap + Bitmap, + /// Output + Output, + /// RangeProof + RangeProof, + /// Kernel + Kernel, +} + +/// Lumps possible types with segment ids to enable a unique identifier +/// for a segment with respect to a particular archive header +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct SegmentTypeIdentifier { + /// The type of this segment + pub segment_type: SegmentType, + /// The identfier itself + pub identifier: SegmentIdentifier, +} + +impl SegmentTypeIdentifier { + /// Create + pub fn new(segment_type: SegmentType, identifier: SegmentIdentifier) -> Self { + Self { + segment_type, + identifier, + } + } +} + +#[derive(Clone, Debug, PartialEq, Eq, thiserror::Error)] +/// Error related to segment creation or validation +pub enum SegmentError { + /// An expected leaf was missing + #[error("Missing leaf at pos {0}")] + MissingLeaf(u64), + /// An expected hash was missing + #[error("Missing hash at pos {0}")] + MissingHash(u64), + /// The segment does not exist + #[error("Segment does not exist")] + NonExistent, + /// Mismatch between expected and actual root hash + #[error("Root hash mismatch")] + Mismatch, +} + +/// Tuple that defines a segment of a given PMMR +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub struct SegmentIdentifier { + /// Height of a segment + pub height: u8, + /// Zero-based index of the segment + pub idx: u64, +} + +impl Display for SegmentIdentifier { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "(h:{}, idx:{})", self.height, self.idx) + } +} + +impl Readable for SegmentIdentifier { + fn read(reader: &mut R) -> Result { + let height = reader.read_u8()?; + let idx = reader.read_u64()?; + Ok(Self { height, idx }) + } +} + +impl Writeable for SegmentIdentifier { + fn write(&self, writer: &mut W) -> Result<(), Error> { + writer.write_u8(self.height)?; + writer.write_u64(self.idx) + } +} + +impl SegmentIdentifier { + /// Test helper to get an iterator of SegmentIdentifiers required to read a + /// pmmr of size `target_mmr_size` in segments of height `segment_height` + pub fn traversal_iter( + target_mmr_size: u64, + segment_height: u8, + ) -> impl Iterator { + (0..SegmentIdentifier::count_segments_required(target_mmr_size, segment_height)).map( + move |idx| SegmentIdentifier { + height: segment_height, + idx: idx as u64, + }, + ) + } + + /// Returns number of segments required that would needed in order to read a + /// pmmr of size `target_mmr_size` in segments of height `segment_height` + pub fn count_segments_required(target_mmr_size: u64, segment_height: u8) -> usize { + let d = 1 << segment_height; + ((pmmr::n_leaves(target_mmr_size) + d - 1) / d) as usize + } + + /// Return pmmr size of number of segments of the given height + pub fn pmmr_size(num_segments: usize, height: u8) -> u64 { + pmmr::insertion_to_pmmr_index(num_segments as u64 * (1 << height)) + } + + /// Maximum number of leaves in a segment, given by `2**height` + pub fn segment_capacity(&self) -> u64 { + 1 << self.height + } + + /// Offset (in leaf idx) of first leaf in the segment + fn leaf_offset(&self) -> u64 { + self.idx * self.segment_capacity() + } + + // Number of leaves in this segment. Equal to capacity except for the final segment, which can be smaller + fn segment_unpruned_size(&self, mmr_size: u64) -> u64 { + min( + self.segment_capacity(), + pmmr::n_leaves(mmr_size).saturating_sub(self.leaf_offset()), + ) + } + + /// Inclusive (full) range of MMR positions for the segment that would be produced + /// by this Identifier + pub fn segment_pos_range(&self, mmr_size: u64) -> (u64, u64) { + let segment_size = self.segment_unpruned_size(mmr_size); + let leaf_offset = self.leaf_offset(); + let first = pmmr::insertion_to_pmmr_index(leaf_offset); + let last = if self.full_segment(mmr_size) { + pmmr::insertion_to_pmmr_index(leaf_offset + segment_size - 1) + (self.height as u64) + } else { + mmr_size - 1 + }; + (first, last) + } + + /// Whether the segment is full (segment size == capacity) + fn full_segment(&self, mmr_size: u64) -> bool { + self.segment_unpruned_size(mmr_size) == self.segment_capacity() + } +} + +/// Segment of a PMMR: unpruned leaves and the necessary data to verify +/// segment membership in the original MMR. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct Segment { + identifier: SegmentIdentifier, + hash_pos: Vec, + hashes: Vec, + leaf_pos: Vec, + leaf_data: Vec, + proof: SegmentProof, +} + +impl Segment { + /// Creates an empty segment + fn empty(identifier: SegmentIdentifier) -> Self { + Segment { + identifier, + hash_pos: Vec::new(), + hashes: Vec::new(), + leaf_pos: Vec::new(), + leaf_data: Vec::new(), + proof: SegmentProof::empty(), + } + } + + /// Maximum number of leaves in a segment, given by `2**height` + fn _segment_capacity(&self) -> u64 { + self.identifier.segment_capacity() + } + + /// Offset (in leaf idx) of first leaf in the segment + fn _leaf_offset(&self) -> u64 { + self.identifier.leaf_offset() + } + + // Number of leaves in this segment. Equal to capacity except for the final segment, which can be smaller + fn segment_unpruned_size(&self, mmr_size: u64) -> u64 { + self.identifier.segment_unpruned_size(mmr_size) + } + + /// Whether the segment is full (segment size == capacity) + fn full_segment(&self, mmr_size: u64) -> bool { + self.identifier.full_segment(mmr_size) + } + + /// Inclusive range of MMR positions for this segment + pub fn segment_pos_range(&self, mmr_size: u64) -> (u64, u64) { + self.identifier.segment_pos_range(mmr_size) + } + + /// TODO - binary_search_by_key() here (can we assume these are sorted by pos?) + fn get_hash(&self, pos0: u64) -> Result { + self.hash_pos + .iter() + .zip(&self.hashes) + .find(|&(&p, _)| p == pos0) + .map(|(_, &h)| h) + .ok_or_else(|| SegmentError::MissingHash(pos0)) + } + + /// Get the identifier associated with this segment + pub fn identifier(&self) -> SegmentIdentifier { + self.identifier + } + + /// Consume the segment and return its parts + pub fn parts( + self, + ) -> ( + SegmentIdentifier, + Vec, + Vec, + Vec, + Vec, + SegmentProof, + ) { + ( + self.identifier, + self.hash_pos, + self.hashes, + self.leaf_pos, + self.leaf_data, + self.proof, + ) + } + + /// Construct a segment from its parts + pub fn from_parts( + identifier: SegmentIdentifier, + hash_pos: Vec, + hashes: Vec, + leaf_pos: Vec, + leaf_data: Vec, + proof: SegmentProof, + ) -> Self { + assert_eq!(hash_pos.len(), hashes.len()); + let mut last = 0; + for &pos in &hash_pos { + assert!(last == 0 || pos > last); + last = pos; + } + assert_eq!(leaf_pos.len(), leaf_data.len()); + last = 0; + for &pos in &leaf_pos { + assert!(last == 0 || pos > last); + last = pos; + } + + Self { + identifier, + hash_pos, + hashes, + leaf_pos, + leaf_data, + proof, + } + } + + /// Iterator of all the leaves in the segment + pub fn leaf_iter(&self) -> impl Iterator + '_ { + self.leaf_pos.iter().map(|&p| p).zip(&self.leaf_data) + } + + /// Iterator of all the hashes in the segment + pub fn hash_iter(&self) -> impl Iterator + '_ { + self.hash_pos + .iter() + .zip(&self.hashes) + .map(|(&p, &h)| (p, h)) + } + + /// Segment proof + pub fn proof(&self) -> &SegmentProof { + &self.proof + } + + /// Segment identifier + pub fn id(&self) -> SegmentIdentifier { + self.identifier + } +} + +impl Segment +where + T: Readable + Writeable + Debug, +{ + /// Generate a segment from a PMMR + pub fn from_pmmr( + segment_id: SegmentIdentifier, + pmmr: &ReadonlyPMMR<'_, U, B>, + prunable: bool, + ) -> Result + where + U: PMMRable, + B: Backend, + { + let mut segment = Segment::empty(segment_id); + + let mmr_size = pmmr.unpruned_size(); + if segment.segment_unpruned_size(mmr_size) == 0 { + return Err(SegmentError::NonExistent); + } + + // Fill leaf data and hashes + let (segment_first_pos, segment_last_pos) = segment.segment_pos_range(mmr_size); + for pos0 in segment_first_pos..=segment_last_pos { + if pmmr::is_leaf(pos0) { + if let Some(data) = pmmr.get_data_from_file(pos0) { + segment.leaf_data.push(data); + segment.leaf_pos.push(pos0); + continue; + } else if !prunable { + return Err(SegmentError::MissingLeaf(pos0)); + } + } + // TODO: optimize, no need to send every intermediary hash + if prunable { + if let Some(hash) = pmmr.get_from_file(pos0) { + segment.hashes.push(hash); + segment.hash_pos.push(pos0); + } + } + } + + let mut start_pos = None; + // Fully pruned segment: only include a single hash, the first unpruned parent + if segment.leaf_data.is_empty() && segment.hashes.is_empty() { + let family_branch = pmmr::family_branch(segment_last_pos, mmr_size); + for (pos0, _) in family_branch { + if let Some(hash) = pmmr.get_from_file(pos0) { + segment.hashes.push(hash); + segment.hash_pos.push(pos0); + start_pos = Some(1 + pos0); + break; + } + } + } + + // Segment merkle proof + segment.proof = SegmentProof::generate( + pmmr, + mmr_size, + 1 + segment_first_pos, + 1 + segment_last_pos, + start_pos, + )?; + + Ok(segment) + } +} + +impl Segment +where + T: PMMRIndexHashable, +{ + /// Calculate root hash of this segment + /// Returns `None` iff the segment is full and completely pruned + pub fn root( + &self, + mmr_size: u64, + bitmap: Option<&Bitmap>, + ) -> Result, SegmentError> { + let (segment_first_pos, segment_last_pos) = self.segment_pos_range(mmr_size); + let mut hashes = Vec::>::with_capacity(2 * (self.identifier.height as usize)); + let mut leaves0 = self.leaf_pos.iter().zip(&self.leaf_data); + for pos0 in segment_first_pos..=segment_last_pos { + let height = pmmr::bintree_postorder_height(pos0); + let hash = if height == 0 { + // Leaf + if bitmap + .map(|b| { + let idx_1 = pmmr::n_leaves(pos0 + 1) - 1; + let idx_2 = if pmmr::is_left_sibling(pos0) { + idx_1 + 1 + } else { + idx_1 - 1 + }; + b.contains(idx_1 as u32) || b.contains(idx_2 as u32) || pos0 == mmr_size - 1 + }) + .unwrap_or(true) + { + // We require the data of this leaf if either the mmr is not prunable or if + // the bitmap indicates it (or its sibling) should be here. + // Edge case: if the final segment has an uneven number of leaves, we + // require the last leaf to be present regardless of the status in the bitmap. + // TODO: possibly remove requirement on the sibling when we no longer support + // syncing through the txhashset.zip method. + let data = leaves0 + .find(|&(&p, _)| p == pos0) + .map(|(_, l)| l) + .ok_or_else(|| SegmentError::MissingLeaf(pos0))?; + Some(data.hash_with_index(pos0)) + } else { + None + } + } else { + let left_child_pos = 1 + pos0 - (1 << height); + let right_child_pos = pos0; + + let right_child = hashes.pop().unwrap(); + let left_child = hashes.pop().unwrap(); + + if bitmap.is_some() { + // Prunable MMR + match (left_child, right_child) { + (None, None) => None, + (Some(l), Some(r)) => Some((l, r).hash_with_index(pos0)), + (None, Some(r)) => { + let l = self.get_hash(left_child_pos - 1)?; + Some((l, r).hash_with_index(pos0)) + } + (Some(l), None) => { + let r = self.get_hash(right_child_pos - 1)?; + Some((l, r).hash_with_index(pos0)) + } + } + } else { + // Non-prunable MMR: require both children + Some( + ( + left_child.ok_or_else(|| SegmentError::MissingHash(left_child_pos))?, + right_child + .ok_or_else(|| SegmentError::MissingHash(right_child_pos))?, + ) + .hash_with_index(pos0), + ) + } + }; + hashes.push(hash); + } + + if self.full_segment(mmr_size) { + // Full segment: last position of segment is subtree root + Ok(hashes.pop().unwrap()) + } else { + // Not full (only final segment): peaks in segment, bag them together + let peaks = pmmr::peaks(mmr_size) + .into_iter() + .filter(|&pos0| pos0 >= segment_first_pos && pos0 <= segment_last_pos) + .rev(); + let mut hash = None; + for pos0 in peaks { + let mut lhash = hashes + .pop() + .ok_or_else(|| SegmentError::MissingHash(1 + pos0))?; + if lhash.is_none() && bitmap.is_some() { + // If this entire peak is pruned, load it from the segment hashes + lhash = Some(self.get_hash(pos0)?); + } + let lhash = lhash.ok_or_else(|| SegmentError::MissingHash(1 + pos0))?; + + hash = match hash { + None => Some(lhash), + Some(rhash) => Some((lhash, rhash).hash_with_index(mmr_size)), + }; + } + Ok(Some(hash.unwrap())) + } + } + + /// Get the first 1-based (sucks) unpruned parent hash of this segment + pub fn first_unpruned_parent( + &self, + mmr_size: u64, + bitmap: Option<&Bitmap>, + ) -> Result<(Hash, u64), SegmentError> { + let root = self.root(mmr_size, bitmap)?; + let (_, last) = self.segment_pos_range(mmr_size); + if let Some(root) = root { + return Ok((root, 1 + last)); + } + let bitmap = bitmap.unwrap(); + let n_leaves = pmmr::n_leaves(mmr_size); + + let mut cardinality = 0; + let mut pos0 = last; + let mut hash = Err(SegmentError::MissingHash(last)); + let mut family_branch = pmmr::family_branch(last, mmr_size).into_iter(); + while cardinality == 0 { + hash = self.get_hash(pos0).map(|h| (h, 1 + pos0)); + if hash.is_ok() { + // Return early in case a lower level hash is already present + // This can occur if both child trees are pruned but compaction hasn't run yet + return hash; + } + + if let Some((p0, _)) = family_branch.next() { + pos0 = p0; + let range = (pmmr::n_leaves(1 + pmmr::bintree_leftmost(p0)) - 1) as u32 + ..min(pmmr::n_leaves(1 + pmmr::bintree_rightmost(p0)), n_leaves) as u32; + cardinality = bitmap.range_cardinality(range); + } else { + break; + } + } + hash + } + + /// Check validity of the segment by calculating its root and validating the merkle proof + pub fn validate( + &self, + mmr_size: u64, + bitmap: Option<&Bitmap>, + mmr_root: Hash, + ) -> Result<(), SegmentError> { + let (first, last) = self.segment_pos_range(mmr_size); + let (segment_root, segment_unpruned_pos) = self.first_unpruned_parent(mmr_size, bitmap)?; + self.proof.validate( + mmr_size, + mmr_root, + first, + last, + segment_root, + segment_unpruned_pos, + ) + } +} + +impl Readable for Segment { + fn read(reader: &mut R) -> Result { + let identifier = Readable::read(reader)?; + + let n_hashes = reader.read_u64()? as usize; + let mut hash_pos = Vec::with_capacity(n_hashes); + let mut last_pos = 0; + for _ in 0..n_hashes { + let pos = reader.read_u64()?; + if pos <= last_pos { + return Err(Error::SortError); + } + last_pos = pos; + hash_pos.push(pos - 1); + } + + let mut hashes = Vec::::with_capacity(n_hashes); + for _ in 0..n_hashes { + hashes.push(Readable::read(reader)?); + } + + let n_leaves = reader.read_u64()? as usize; + let mut leaf_pos = Vec::with_capacity(n_leaves); + last_pos = 0; + for _ in 0..n_leaves { + let pos = reader.read_u64()?; + if pos <= last_pos { + return Err(Error::SortError); + } + last_pos = pos; + leaf_pos.push(pos - 1); + } + + let mut leaf_data = Vec::::with_capacity(n_leaves); + for _ in 0..n_leaves { + leaf_data.push(Readable::read(reader)?); + } + + let proof = Readable::read(reader)?; + + Ok(Self { + identifier, + hash_pos, + hashes, + leaf_pos, + leaf_data, + proof, + }) + } +} + +impl Writeable for Segment { + fn write(&self, writer: &mut W) -> Result<(), Error> { + Writeable::write(&self.identifier, writer)?; + writer.write_u64(self.hashes.len() as u64)?; + for &pos in &self.hash_pos { + writer.write_u64(1 + pos)?; + } + for hash in &self.hashes { + Writeable::write(hash, writer)?; + } + writer.write_u64(self.leaf_data.len() as u64)?; + for &pos in &self.leaf_pos { + writer.write_u64(1 + pos)?; + } + for data in &self.leaf_data { + Writeable::write(data, writer)?; + } + Writeable::write(&self.proof, writer)?; + Ok(()) + } +} + +/// Merkle proof of a segment +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct SegmentProof { + hashes: Vec, +} + +impl SegmentProof { + fn empty() -> Self { + Self { hashes: Vec::new() } + } + + fn generate( + pmmr: &ReadonlyPMMR<'_, U, B>, + last_pos: u64, + segment_first_pos: u64, + segment_last_pos: u64, + start_pos: Option, + ) -> Result + where + U: PMMRable, + B: Backend, + { + let family_branch = pmmr::family_branch(segment_last_pos - 1, last_pos); + + // 1. siblings along the path from the subtree root to the peak + let hashes: Result, _> = family_branch + .iter() + .filter(|&&(p0, _)| start_pos.map(|s| p0 >= s).unwrap_or(true)) + .map(|&(_, s0)| { + pmmr.get_hash(s0) + .ok_or_else(|| SegmentError::MissingHash(s0)) + }) + .collect(); + let mut proof = Self { hashes: hashes? }; + + // 2. bagged peaks to the right + let peak_pos = family_branch + .last() + .map(|&(p0, _)| p0) + .unwrap_or(segment_last_pos - 1); + if let Some(h) = pmmr.bag_the_rhs(peak_pos) { + proof.hashes.push(h); + } + + // 3. peaks to the left + let peaks: Result, _> = pmmr::peaks(last_pos) + .into_iter() + .filter(|&x| 1 + x < segment_first_pos) + .rev() + .map(|p| pmmr.get_hash(p).ok_or_else(|| SegmentError::MissingHash(p))) + .collect(); + proof.hashes.extend(peaks?); + + Ok(proof) + } + + /// Size of the proof in hashes. + pub fn size(&self) -> usize { + self.hashes.len() + } + + /// Reconstruct PMMR root using this proof + pub fn reconstruct_root( + &self, + last_pos: u64, + segment_first_pos0: u64, + segment_last_pos0: u64, + segment_root: Hash, + segment_unpruned_pos: u64, + ) -> Result { + let mut iter = self.hashes.iter(); + let family_branch = pmmr::family_branch(segment_last_pos0, last_pos); + + // 1. siblings along the path from the subtree root to the peak + let mut root = segment_root; + for &(p0, s0) in family_branch + .iter() + .filter(|&&(p0, _)| p0 >= segment_unpruned_pos) + { + let sibling_hash = iter + .next() + .ok_or_else(|| SegmentError::MissingHash(1 + s0))?; + root = if pmmr::is_left_sibling(s0) { + (sibling_hash, root).hash_with_index(p0) + } else { + (root, sibling_hash).hash_with_index(p0) + }; + } + + // 2. bagged peaks to the right + let peak_pos0 = family_branch + .last() + .map(|&(p0, _)| p0) + .unwrap_or(segment_last_pos0); + + let rhs = pmmr::peaks(last_pos) + .into_iter() + .filter(|&x| x > peak_pos0) + .next(); + + if let Some(pos0) = rhs { + root = ( + root, + iter.next() + .ok_or_else(|| SegmentError::MissingHash(1 + pos0))?, + ) + .hash_with_index(last_pos) + } + + // 3. peaks to the left + let peaks = pmmr::peaks(last_pos) + .into_iter() + .filter(|&x| x < segment_first_pos0) + .rev(); + for pos0 in peaks { + root = ( + iter.next() + .ok_or_else(|| SegmentError::MissingHash(1 + pos0))?, + root, + ) + .hash_with_index(last_pos); + } + + Ok(root) + } + + /// Check validity of the proof by equating the reconstructed root with the actual root + pub fn validate( + &self, + last_pos: u64, + mmr_root: Hash, + segment_first_pos: u64, + segment_last_pos: u64, + segment_root: Hash, + segment_unpruned_pos: u64, + ) -> Result<(), SegmentError> { + let root = self.reconstruct_root( + last_pos, + segment_first_pos, + segment_last_pos, + segment_root, + segment_unpruned_pos, + )?; + if root == mmr_root { + Ok(()) + } else { + Err(SegmentError::Mismatch) + } + } +} + +impl Readable for SegmentProof { + fn read(reader: &mut R) -> Result { + let n_hashes = reader.read_u64()? as usize; + let mut hashes = Vec::with_capacity(n_hashes); + for _ in 0..n_hashes { + let hash: Hash = Readable::read(reader)?; + hashes.push(hash); + } + Ok(Self { hashes }) + } +} + +impl Writeable for SegmentProof { + fn write(&self, writer: &mut W) -> Result<(), Error> { + writer.write_u64(self.hashes.len() as u64)?; + for hash in &self.hashes { + Writeable::write(hash, writer)?; + } + Ok(()) + } +} diff --git a/core/src/core/pmmr/vec_backend.rs b/core/src/core/pmmr/vec_backend.rs index fea1dbe633..ee4629d0a0 100644 --- a/core/src/core/pmmr/vec_backend.rs +++ b/core/src/core/pmmr/vec_backend.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -35,39 +35,50 @@ pub struct VecBackend { } impl Backend for VecBackend { - fn append(&mut self, elmt: &T, hashes: Vec) -> Result<(), String> { + fn append(&mut self, elmt: &T, hashes: &[Hash]) -> Result<(), String> { if let Some(data) = &mut self.data { data.push(elmt.clone()); } - self.hashes.append(&mut hashes.clone()); + self.hashes.extend_from_slice(hashes); Ok(()) } - fn get_hash(&self, position: u64) -> Option { - if self.removed.contains(&position) { + fn append_pruned_subtree(&mut self, _hash: Hash, _pos0: u64) -> Result<(), String> { + unimplemented!() + } + + fn append_hash(&mut self, _hash: Hash) -> Result<(), String> { + unimplemented!() + } + + fn get_hash(&self, pos0: u64) -> Option { + if self.removed.contains(&pos0) { None } else { - self.get_from_file(position) + self.get_from_file(pos0) } } - fn get_data(&self, position: u64) -> Option { - if self.removed.contains(&position) { + fn get_data(&self, pos0: u64) -> Option { + if self.removed.contains(&pos0) { None } else { - self.get_data_from_file(position) + self.get_data_from_file(pos0) } } - fn get_from_file(&self, position: u64) -> Option { - let idx = usize::try_from(position.saturating_sub(1)).expect("usize from u64"); + fn get_from_file(&self, pos0: u64) -> Option { + let idx = usize::try_from(pos0).expect("usize from u64"); self.hashes.get(idx).cloned() } - fn get_data_from_file(&self, position: u64) -> Option { + fn get_peak_from_file(&self, pos0: u64) -> Option { + self.get_from_file(pos0) + } + + fn get_data_from_file(&self, pos0: u64) -> Option { if let Some(data) = &self.data { - let idx = usize::try_from(pmmr::n_leaves(position).saturating_sub(1)) - .expect("usize from u64"); + let idx = usize::try_from(pmmr::n_leaves(1 + pos0) - 1).expect("usize from u64"); data.get(idx).map(|x| x.as_elmt()) } else { None @@ -79,30 +90,43 @@ impl Backend for VecBackend { unimplemented!() } + fn n_unpruned_leaves_to_index(&self, _to_index: u64) -> u64 { + unimplemented!() + } + fn leaf_pos_iter(&self) -> Box + '_> { Box::new( self.hashes .iter() .enumerate() - .map(|(x, _)| (x + 1) as u64) + .map(|(x, _)| x as u64) .filter(move |x| pmmr::is_leaf(*x) && !self.removed.contains(x)), ) } + /// NOTE this function is needlessly inefficient with repeated calls to n_leaves() fn leaf_idx_iter(&self, from_idx: u64) -> Box + '_> { - let from_pos = pmmr::insertion_to_pmmr_index(from_idx + 1); + let from_pos = pmmr::insertion_to_pmmr_index(from_idx); Box::new( self.leaf_pos_iter() .skip_while(move |x| *x < from_pos) - .map(|x| pmmr::n_leaves(x).saturating_sub(1)), + .map(|x| pmmr::n_leaves(x + 1) - 1), ) } - fn remove(&mut self, position: u64) -> Result<(), String> { - self.removed.insert(position); + fn remove(&mut self, pos0: u64) -> Result<(), String> { + self.removed.insert(pos0); Ok(()) } + fn remove_from_leaf_set(&mut self, _pos0: u64) { + unimplemented!() + } + + fn reset_prune_list(&mut self) { + unimplemented!() + } + fn rewind(&mut self, position: u64, _rewind_rm_pos: &Bitmap) -> Result<(), String> { if let Some(data) = &mut self.data { let idx = pmmr::n_leaves(position); diff --git a/core/src/core/transaction.rs b/core/src/core/transaction.rs index e4afbcf1f3..c4fef8c250 100644 --- a/core/src/core/transaction.rs +++ b/core/src/core/transaction.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ //! Transactions +use crate::core::block::HeaderVersion; use crate::core::hash::{DefaultHashable, Hashed}; -use crate::core::verifier_cache::VerifierCache; use crate::core::{committed, Committed}; use crate::libtx::{aggsig, secp_ser}; use crate::ser::{ @@ -25,17 +25,194 @@ use crate::ser::{ use crate::{consensus, global}; use enum_primitive::FromPrimitive; use keychain::{self, BlindingFactor}; +use serde::de; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; use std::cmp::Ordering; use std::cmp::{max, min}; use std::convert::{TryFrom, TryInto}; -use std::sync::Arc; +use std::fmt; +use std::fmt::Display; use util; use util::secp; use util::secp::pedersen::{Commitment, RangeProof}; use util::static_secp_instance; -use util::RwLock; use util::ToHex; +/// Fee fields as in fix-fees RFC: { future_use: 20, fee_shift: 4, fee: 40 } +#[derive(Debug, Clone, Copy, PartialEq)] +pub struct FeeFields(u64); + +impl DefaultHashable for FeeFields {} + +impl Writeable for FeeFields { + fn write(&self, writer: &mut W) -> Result<(), ser::Error> { + writer.write_u64(self.0) + } +} + +impl Readable for FeeFields { + fn read(reader: &mut R) -> Result { + let fee_fields = reader.read_u64()?; + Ok(Self(fee_fields)) + } +} + +impl Display for FeeFields { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +impl Serialize for FeeFields { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.collect_str(&self.0) + } +} + +impl<'de> Deserialize<'de> for FeeFields { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct FeeFieldsVisitor; + impl<'de> de::Visitor<'de> for FeeFieldsVisitor { + type Value = FeeFields; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("an 64-bit integer") + } + + fn visit_str(self, value: &str) -> Result + where + E: de::Error, + { + let value = value + .parse() + .map_err(|_| E::custom(format!("invalid fee field")))?; + self.visit_u64(value) + } + + fn visit_u64(self, value: u64) -> Result + where + E: de::Error, + { + Ok(FeeFields(value)) + } + } + + deserializer.deserialize_any(FeeFieldsVisitor) + } +} + +/// Conversion from a valid fee to a FeeFields with 0 fee_shift +/// The valid fee range is 1..FEE_MASK +impl TryFrom for FeeFields { + type Error = Error; + + fn try_from(fee: u64) -> Result { + if fee == 0 { + Err(Error::InvalidFeeFields(format!("fee is zero"))) + } else if fee > FeeFields::FEE_MASK { + Err(Error::InvalidFeeFields(format!("fee {} is too high", fee))) + } else { + Ok(Self(fee)) + } + } +} + +/// Conversion from a 32-bit fee to a FeeFields with 0 fee_shift +/// For use exclusively in tests with constant fees +impl From for FeeFields { + fn from(fee: u32) -> Self { + Self(fee as u64) + } +} + +impl From for u64 { + fn from(fee_fields: FeeFields) -> Self { + fee_fields.0 as u64 + } +} + +impl FeeFields { + /// Fees are limited to 40 bits + const FEE_BITS: u32 = 40; + /// Used to extract fee field + const FEE_MASK: u64 = (1u64 << FeeFields::FEE_BITS) - 1; + + /// Fee shifts are limited to 4 bits + pub const FEE_SHIFT_BITS: u32 = 4; + /// Used to extract fee_shift field + pub const FEE_SHIFT_MASK: u64 = (1u64 << FeeFields::FEE_SHIFT_BITS) - 1; + + /// Create a zero FeeFields with 0 fee and 0 fee_shift + pub fn zero() -> Self { + Self(0) + } + + /// Create a new FeeFields from the provided shift and fee + /// Checks both are valid (in range) + pub fn new(fee_shift: u64, fee: u64) -> Result { + if fee == 0 { + Err(Error::InvalidFeeFields(format!("fee is zero"))) + } else if fee > FeeFields::FEE_MASK { + Err(Error::InvalidFeeFields(format!("fee {} is too high", fee))) + } else if fee_shift > FeeFields::FEE_SHIFT_MASK { + Err(Error::InvalidFeeFields(format!( + "fee_shift {} is too high", + fee_shift + ))) + } else { + Ok(Self((fee_shift << FeeFields::FEE_BITS) | fee)) + } + } + + /// Extract fee_shift field + pub fn fee_shift(&self, height: u64) -> u8 { + // MWC NOTE, header version 3 is a next future hardfork, we don't know when it happens. + if consensus::header_version(height) < HeaderVersion(3) { + 0 + } else { + ((self.0 >> FeeFields::FEE_BITS) & FeeFields::FEE_SHIFT_MASK) as u8 + } + } + + /// Extract fee field + pub fn fee(&self, height: u64) -> u64 { + // MWC NOTE, header version 3 is a next future hardfork, we don't know when it happens. + if consensus::header_version(height) < HeaderVersion(3) { + self.0 + } else { + self.0 & FeeFields::FEE_MASK + } + } + + /// Turn a zero `FeeField` into a `None`, any other value into a `Some`. + /// We need this because a zero `FeeField` cannot be deserialized. + pub fn as_opt(&self) -> Option { + if self.is_zero() { + None + } else { + Some(*self) + } + } + + /// Check if the `FeeFields` is set to zero + pub fn is_zero(&self) -> bool { + self.0 == 0 + } +} + +fn fee_fields_as_int(fee_fields: &FeeFields, serializer: S) -> Result +where + S: Serializer, +{ + serializer.serialize_u64(fee_fields.0) +} + /// Relative height field on NRD kernel variant. /// u16 representing a height between 1 and MAX (consensus::WEEK_HEIGHT). #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)] @@ -64,12 +241,11 @@ impl TryFrom for NRDRelativeHeight { type Error = Error; fn try_from(height: u16) -> Result { - if height == 0 { - Err(Error::InvalidNRDRelativeHeight) - } else if height - > NRDRelativeHeight::MAX - .try_into() - .expect("WEEK_HEIGHT const should fit in u16") + if height == 0 + || height + > NRDRelativeHeight::MAX + .try_into() + .expect("WEEK_HEIGHT const should fit in u16") { Err(Error::InvalidNRDRelativeHeight) } else { @@ -108,21 +284,24 @@ pub enum KernelFeatures { /// Plain kernel (the default for Grin txs). Plain { /// Plain kernels have fees. - fee: u64, + #[serde(serialize_with = "fee_fields_as_int")] + fee: FeeFields, }, /// A coinbase kernel. Coinbase, /// A kernel with an explicit lock height (and fee). HeightLocked { /// Height locked kernels have fees. - fee: u64, + #[serde(serialize_with = "fee_fields_as_int")] + fee: FeeFields, /// Height locked kernels have lock heights. lock_height: u64, }, /// "No Recent Duplicate" (NRD) kernels enforcing relative lock height between instances. NoRecentDuplicate { /// These have fees. - fee: u64, + #[serde(serialize_with = "fee_fields_as_int")] + fee: FeeFields, /// Relative lock height. relative_height: NRDRelativeHeight, }, @@ -155,10 +334,10 @@ impl KernelFeatures { } } - /// msg = hash(features) for coinbase kernels - /// hash(features || fee) for plain kernels - /// hash(features || fee || lock_height) for height locked kernels - /// hash(features || fee || relative_height) for NRD kernels + /// msg = hash(features) for coinbase kernels + /// hash(features || fee_fields) for plain kernels + /// hash(features || fee_fields || lock_height) for height locked kernels + /// hash(features || fee_fields || relative_height) for NRD kernels pub fn kernel_sig_msg(&self) -> Result { let x = self.as_u8(); let hash = match self { @@ -176,38 +355,39 @@ impl KernelFeatures { } /// Get paid fee for this kernel - pub fn get_fee(&self) -> u64 { + /// Pessimistic because returned value can be higher then real fee if shift in not 0 + /// For out use case it is ok, there is no transaction priority or whatever + pub fn get_fee_pessimistic(&self) -> u64 { match self { - KernelFeatures::Plain { fee } => fee, - KernelFeatures::Coinbase => &0, + KernelFeatures::Plain { fee } => fee.0, + KernelFeatures::Coinbase => 0, KernelFeatures::HeightLocked { fee, lock_height: _, - } => fee, + } => fee.0, KernelFeatures::NoRecentDuplicate { fee, relative_height: _, - } => fee, + } => fee.0, } - .clone() } /// Write tx kernel features out in v1 protocol format. - /// Always include the fee and lock_height, writing 0 value if unused. + /// Always include the fee_fields and lock_height, writing 0 value if unused. fn write_v1(&self, writer: &mut W) -> Result<(), ser::Error> { writer.write_u8(self.as_u8())?; match self { KernelFeatures::Plain { fee } => { - writer.write_u64(*fee)?; + fee.write(writer)?; // Write "empty" bytes for feature specific data (8 bytes). writer.write_empty_bytes(8)?; } KernelFeatures::Coinbase => { - // Write "empty" bytes for fee (8 bytes) and feature specific data (8 bytes). + // Write "empty" bytes for fee_fields (8 bytes) and feature specific data (8 bytes). writer.write_empty_bytes(16)?; } KernelFeatures::HeightLocked { fee, lock_height } => { - writer.write_u64(*fee)?; + fee.write(writer)?; // 8 bytes of feature specific data containing the lock height as big-endian u64. writer.write_u64(*lock_height)?; } @@ -215,7 +395,7 @@ impl KernelFeatures { fee, relative_height, } => { - writer.write_u64(*fee)?; + fee.write(writer)?; // 8 bytes of feature specific data. First 6 bytes are empty. // Last 2 bytes contain the relative lock height as big-endian u16. @@ -230,20 +410,20 @@ impl KernelFeatures { /// Write tx kernel features out in v2 protocol format. /// These are variable sized based on feature variant. - /// Only write fee out for feature variants that support it. + /// Only write fee_fields out for feature variants that support it. /// Only write lock_height out for feature variants that support it. fn write_v2(&self, writer: &mut W) -> Result<(), ser::Error> { writer.write_u8(self.as_u8())?; match self { KernelFeatures::Plain { fee } => { // Fee only, no additional data on plain kernels. - writer.write_u64(*fee)?; + fee.write(writer)?; } KernelFeatures::Coinbase => { // No additional data. } KernelFeatures::HeightLocked { fee, lock_height } => { - writer.write_u64(*fee)?; + fee.write(writer)?; // V2 height locked kernels use 8 bytes for the lock height. writer.write_u64(*lock_height)?; } @@ -251,7 +431,7 @@ impl KernelFeatures { fee, relative_height, } => { - writer.write_u64(*fee)?; + fee.write(writer)?; // V2 NRD kernels use 2 bytes for the relative lock height. relative_height.write(writer)?; } @@ -259,7 +439,7 @@ impl KernelFeatures { Ok(()) } - // Always read feature byte, 8 bytes for fee and 8 bytes for additional data + // Always read feature byte, 8 bytes for fee_fields and 8 bytes for additional data // representing lock height or relative height. // Fee and additional data may be unused for some kernel variants but we need // to read these bytes and verify they are 0 if unused. @@ -267,19 +447,19 @@ impl KernelFeatures { let feature_byte = reader.read_u8()?; let features = match feature_byte { KernelFeatures::PLAIN_U8 => { - let fee = reader.read_u64()?; + let fee = FeeFields::read(reader)?; // 8 "empty" bytes as additional data is not used. reader.read_empty_bytes(8)?; KernelFeatures::Plain { fee } } KernelFeatures::COINBASE_U8 => { - // 8 "empty" bytes as fee is not used. + // 8 "empty" bytes as fee_fields is not used. // 8 "empty" bytes as additional data is not used. reader.read_empty_bytes(16)?; KernelFeatures::Coinbase } KernelFeatures::HEIGHT_LOCKED_U8 => { - let fee = reader.read_u64()?; + let fee = FeeFields::read(reader)?; // 8 bytes of feature specific data, lock height as big-endian u64. let lock_height = reader.read_u64()?; KernelFeatures::HeightLocked { fee, lock_height } @@ -290,7 +470,7 @@ impl KernelFeatures { return Err(ser::Error::CorruptedData("NRD is disabled".to_string())); } - let fee = reader.read_u64()?; + let fee = FeeFields::read(reader)?; // 8 bytes of feature specific data. // The first 6 bytes must be "empty". @@ -317,12 +497,12 @@ impl KernelFeatures { fn read_v2(reader: &mut R) -> Result { let features = match reader.read_u8()? { KernelFeatures::PLAIN_U8 => { - let fee = reader.read_u64()?; + let fee = FeeFields::read(reader)?; KernelFeatures::Plain { fee } } KernelFeatures::COINBASE_U8 => KernelFeatures::Coinbase, KernelFeatures::HEIGHT_LOCKED_U8 => { - let fee = reader.read_u64()?; + let fee = FeeFields::read(reader)?; let lock_height = reader.read_u64()?; KernelFeatures::HeightLocked { fee, lock_height } } @@ -332,7 +512,7 @@ impl KernelFeatures { return Err(ser::Error::CorruptedData("NRD is disabled".to_string())); } - let fee = reader.read_u64()?; + let fee = FeeFields::read(reader)?; let relative_height = NRDRelativeHeight::read(reader)?; KernelFeatures::NoRecentDuplicate { fee, @@ -377,58 +557,61 @@ impl Readable for KernelFeatures { } /// Errors thrown by Transaction validation -#[derive(Fail, Clone, Eq, Debug, PartialEq, Serialize, Deserialize)] +#[derive(thiserror::Error, Clone, Eq, Debug, PartialEq, Serialize, Deserialize)] pub enum Error { /// Underlying Secp256k1 error (signature validation or invalid public key /// typically) - #[fail(display = "Secp256k1 error, {}", _0)] + #[error("Secp256k1 error, {0}")] Secp(secp::Error), /// Underlying keychain related error - #[fail(display = "Keychain error, {}", _0)] + #[error("Keychain error, {0}")] Keychain(keychain::Error), /// The sum of output minus input commitments does not /// match the sum of kernel commitments - #[fail(display = "Tx Kernel Sum Mismatch")] + #[error("Tx Kernel Sum Mismatch")] KernelSumMismatch, /// Restrict tx total weight. - #[fail(display = "Tx total weight too heavy")] + #[error("Tx total weight too heavy")] TooHeavy, /// Error originating from an invalid lock-height - #[fail(display = "Tx Invalid lock height {}", _0)] + #[error("Tx Invalid lock height {0}")] LockHeight(u64), /// Range proof validation error - #[fail(display = "Tx Invalid range proof")] + #[error("Tx Invalid range proof")] RangeProof, /// Error originating from an invalid Merkle proof - #[fail(display = "Tx Invalid Merkle Proof")] + #[error("Tx Invalid Merkle Proof")] MerkleProof, /// Returns if the value hidden within the a RangeProof message isn't /// repeated 3 times, indicating it's incorrect - #[fail(display = "Tx Invalid Proof Message")] + #[error("Tx Invalid Proof Message")] InvalidProofMessage, /// Error when verifying kernel sums via committed trait. - #[fail(display = "Tx Verifying kernel sums error, {}", _0)] + #[error("Tx Verifying kernel sums error, {0}")] Committed(committed::Error), /// Validation error relating to cut-through (tx is spending its own /// output). - #[fail(display = "Tx cut through error")] + #[error("Tx cut through error")] CutThrough, /// Validation error relating to output features. /// It is invalid for a transaction to contain a coinbase output, for example. - #[fail(display = "Tx Invalid output feature")] + #[error("Tx Invalid output feature")] InvalidOutputFeatures, /// Validation error relating to kernel features. /// It is invalid for a transaction to contain a coinbase kernel, for example. - #[fail(display = "Tx Invalid kernel feature")] + #[error("Tx Invalid kernel feature")] InvalidKernelFeatures, + /// feeshift is limited to 4 bits and fee must be positive and fit in 40 bits. + #[error("Invalid Fee Fields, {0}")] + InvalidFeeFields(String), /// NRD kernel relative height is limited to 1 week duration and must be greater than 0. - #[fail(display = "Invalid NRD kernel relative height")] + #[error("Invalid NRD kernel relative height")] InvalidNRDRelativeHeight, /// Signature verification error. - #[fail(display = "Tx Invalid signature")] + #[error("Tx Invalid signature")] IncorrectSignature, /// Underlying serialization error. - #[fail(display = "Tx Serialization error, {}", _0)] + #[error("Tx Serialization error, {0}")] Serialization(ser::Error), } @@ -459,7 +642,7 @@ impl From for Error { /// A proof that a transaction sums to zero. Includes both the transaction's /// Pedersen commitment and the signature, that guarantees that the commitments /// amount to zero. -/// The signature signs the fee and the lock_height, which are retained for +/// The signature signs the fee_fields and the lock_height, which are retained for /// signature validation. #[derive(Serialize, Deserialize, Debug, Clone, Copy)] pub struct TxKernel { @@ -474,7 +657,7 @@ pub struct TxKernel { )] pub excess: Commitment, /// The signature proving the excess is a valid public key, which signs - /// the transaction fee. + /// the transaction fee_fields. #[serde(with = "secp_ser::sig_serde")] pub excess_sig: secp::Signature, } @@ -586,21 +769,21 @@ impl TxKernel { } /// The msg signed as part of the tx kernel. - /// Based on kernel features and associated fields (fee and lock_height). + /// Based on kernel features and associated fields (fee_fields and lock_height). pub fn msg_to_sign(&self) -> Result { let msg = self.features.kernel_sig_msg()?; Ok(msg) } /// Verify the transaction proof validity. Entails handling the commitment - /// as a public key and checking the signature verifies with the fee as + /// as a public key and checking the signature verifies with the fee_fields as /// message. pub fn verify(&self) -> Result<(), Error> { let secp = static_secp_instance(); let secp = secp.lock(); let sig = &self.excess_sig; // Verify aggsig directly in libsecp - let pubkey = &self.excess.to_pubkey()?; + let pubkey = &self.excess.to_pubkey(&secp)?; if !aggsig::verify_single( &secp, &sig, @@ -627,7 +810,7 @@ impl TxKernel { for tx_kernel in tx_kernels { sigs.push(tx_kernel.excess_sig); - pubkeys.push(tx_kernel.excess.to_pubkey()?); + pubkeys.push(tx_kernel.excess.to_pubkey(&secp)?); msgs.push(tx_kernel.msg_to_sign()?); } @@ -640,7 +823,9 @@ impl TxKernel { /// Build an empty tx kernel with zero values. pub fn empty() -> TxKernel { - TxKernel::with_features(KernelFeatures::Plain { fee: 0 }) + TxKernel::with_features(KernelFeatures::Plain { + fee: FeeFields::zero(), + }) } /// Build an empty tx kernel with the provided kernel features. @@ -723,8 +908,7 @@ impl Readable for TransactionBody { // Quick block weight check before proceeding. // Note: We use weight_as_block here (inputs have weight). - let tx_block_weight = - TransactionBody::weight_as_block(num_inputs, num_outputs, num_kernels); + let tx_block_weight = TransactionBody::weight_by_iok(num_inputs, num_outputs, num_kernels); if num_inputs > ser::READ_VEC_SIZE_LIMIT || num_outputs > ser::READ_VEC_SIZE_LIMIT @@ -912,7 +1096,7 @@ impl TransactionBody { } /// Total fee for a TransactionBody is the sum of fees of all fee carrying kernels. - pub fn fee(&self) -> u64 { + pub fn fee(&self, height: u64) -> u64 { self.kernels .iter() .filter_map(|k| match k.features { @@ -921,49 +1105,57 @@ impl TransactionBody { KernelFeatures::HeightLocked { fee, .. } => Some(fee), KernelFeatures::NoRecentDuplicate { fee, .. } => Some(fee), }) - .fold(0, |acc, fee| acc.saturating_add(fee)) + .fold(0, |acc, fee_fields| { + acc.saturating_add(fee_fields.fee(height)) + }) } - fn overage(&self) -> i64 { - self.fee() as i64 + /// fee_shift for a TransactionBody is the maximum of fee_shifts of all fee carrying kernels. + pub fn fee_shift(&self, height: u64) -> u8 { + self.kernels + .iter() + .filter_map(|k| match k.features { + KernelFeatures::Coinbase => None, + KernelFeatures::Plain { fee } => Some(fee), + KernelFeatures::HeightLocked { fee, .. } => Some(fee), + KernelFeatures::NoRecentDuplicate { fee, .. } => Some(fee), + }) + .fold(0, |acc, fee_fields| max(acc, fee_fields.fee_shift(height))) } - /// Calculate transaction weight - pub fn body_weight(&self) -> u64 { - TransactionBody::weight( - self.inputs.len() as u64, - self.outputs.len() as u64, - self.kernels.len() as u64, - ) + /// Shifted fee for a TransactionBody is the sum of fees shifted right by the maximum fee_shift + /// this is used to determine whether a tx can be relayed or accepted in a mempool + /// where transactions can specify a higher block-inclusion priority as a positive shift up to 15 + /// but are required to overpay the minimum required fees by a factor of 2^priority + pub fn shifted_fee(&self, height: u64) -> u64 { + self.fee(height) >> self.fee_shift(height) + } + + /// aggregate fee_fields from all appropriate kernels in TransactionBody into one, if possible + pub fn aggregate_fee_fields(&self, height: u64) -> Result { + FeeFields::new(self.fee_shift(height) as u64, self.fee(height)) + } + + fn overage(&self, height: u64) -> i64 { + self.fee(height) as i64 } /// Calculate weight of transaction using block weighing - pub fn body_weight_as_block(&self) -> u64 { - TransactionBody::weight_as_block( + pub fn weight(&self) -> u64 { + TransactionBody::weight_by_iok( self.inputs.len() as u64, self.outputs.len() as u64, self.kernels.len() as u64, ) } - /// Calculate transaction weight from transaction details. This is non - /// consensus critical and compared to block weight, incentivizes spending - /// more outputs (to lower the fee). - pub fn weight(num_inputs: u64, num_outputs: u64, num_kernels: u64) -> u64 { - let body_weight = num_outputs - .saturating_mul(4) - .saturating_add(num_kernels) - .saturating_sub(num_inputs); - max(body_weight, 1) - } - /// Calculate transaction weight using block weighing from transaction /// details. Consensus critical and uses consensus weight values. - pub fn weight_as_block(num_inputs: u64, num_outputs: u64, num_kernels: u64) -> u64 { + pub fn weight_by_iok(num_inputs: u64, num_outputs: u64, num_kernels: u64) -> u64 { num_inputs - .saturating_mul(consensus::BLOCK_INPUT_WEIGHT as u64) - .saturating_add(num_outputs.saturating_mul(consensus::BLOCK_OUTPUT_WEIGHT as u64)) - .saturating_add(num_kernels.saturating_mul(consensus::BLOCK_KERNEL_WEIGHT as u64)) + .saturating_mul(consensus::INPUT_WEIGHT as u64) + .saturating_add(num_outputs.saturating_mul(consensus::OUTPUT_WEIGHT as u64)) + .saturating_add(num_kernels.saturating_mul(consensus::KERNEL_WEIGHT as u64)) } /// Lock height of a body is the max lock height of the kernels. @@ -983,7 +1175,7 @@ impl TransactionBody { fn verify_weight(&self, weighting: Weighting) -> Result<(), Error> { // A coinbase reward is a single output and a single kernel (for now). // We need to account for this when verifying max tx weights. - let coinbase_weight = consensus::BLOCK_OUTPUT_WEIGHT + consensus::BLOCK_KERNEL_WEIGHT; + let coinbase_weight = consensus::OUTPUT_WEIGHT + consensus::KERNEL_WEIGHT; // If "tx" body then remember to reduce the max_block_weight by the weight of a kernel. // If "limited tx" then compare against the provided max_weight. @@ -994,7 +1186,7 @@ impl TransactionBody { // for the additional coinbase reward (1 output + 1 kernel). // let max_weight = match weighting { - Weighting::AsTransaction => global::max_block_weight().saturating_sub(coinbase_weight), + Weighting::AsTransaction => global::max_tx_weight(), Weighting::AsLimitedTransaction(max_weight) => { min(global::max_block_weight(), max_weight).saturating_sub(coinbase_weight) } @@ -1005,7 +1197,7 @@ impl TransactionBody { } }; - if self.body_weight_as_block() > max_weight { + if self.weight() > max_weight { return Err(Error::TooHeavy); } Ok(()) @@ -1113,11 +1305,7 @@ impl TransactionBody { /// Validates all relevant parts of a transaction body. Checks the /// excess value against the signature as well as range proofs for each /// output. - pub fn validate( - &self, - weighting: Weighting, - _verifier: Arc>, - ) -> Result<(), Error> { + pub fn validate(&self, weighting: Weighting) -> Result<(), Error> { self.validate_read(weighting)?; // Now batch verify all those unverified rangeproofs @@ -1291,13 +1479,23 @@ impl Transaction { } /// Total fee for a transaction is the sum of fees of all kernels. - pub fn fee(&self) -> u64 { - self.body.fee() + pub fn fee(&self, height: u64) -> u64 { + self.body.fee(height) + } + + /// Shifted fee for a transaction is the sum of fees of all kernels shifted right by the maximum fee shift + pub fn shifted_fee(&self, height: u64) -> u64 { + self.body.shifted_fee(height) + } + + /// aggregate fee_fields from all appropriate kernels in transaction into one + pub fn aggregate_fee_fields(&self, height: u64) -> Result { + self.body.aggregate_fee_fields(height) } /// Total overage across all kernels. - pub fn overage(&self) -> i64 { - self.body.overage() + pub fn overage(&self, height: u64) -> i64 { + self.body.overage(height) } /// Lock height of a transaction is the max lock height of the kernels. @@ -1319,36 +1517,51 @@ impl Transaction { /// Validates all relevant parts of a fully built transaction. Checks the /// excess value against the signature as well as range proofs for each /// output. - pub fn validate( - &self, - weighting: Weighting, - verifier: Arc>, - ) -> Result<(), Error> { + pub fn validate(&self, weighting: Weighting, height: u64) -> Result<(), Error> { self.body.verify_features()?; - self.body.validate(weighting, verifier)?; - self.verify_kernel_sums(self.overage(), self.offset.clone())?; + self.body.validate(weighting)?; + self.verify_kernel_sums(self.overage(height), self.offset.clone())?; Ok(()) } - /// Can be used to compare txs by their fee/weight ratio. + /// Can be used to compare txs by their fee/weight ratio, aka feerate. /// Don't use these values for anything else though due to precision multiplier. - pub fn fee_to_weight(&self) -> u64 { - self.fee() * 1_000 / self.tx_weight() as u64 + pub fn fee_rate(&self, height: u64) -> u64 { + self.fee(height) / self.weight() as u64 } /// Calculate transaction weight - pub fn tx_weight(&self) -> u64 { - self.body.body_weight() + pub fn weight(&self) -> u64 { + self.body.weight() + } + + /// Transaction minimum acceptable fee + pub fn accept_fee(&self, height: u64) -> u64 { + // Note MWC. Header Version 3 is future versions for the mainnet, + // This feature is related to miners only, there is no consensus breaking. + if consensus::header_version(height) < HeaderVersion(3) { + Transaction::old_weight_by_iok( + self.body.inputs.len() as u64, + self.body.outputs.len() as u64, + self.body.kernels.len() as u64, + ) * consensus::MILLI_GRIN + } else { + self.weight() * global::get_accept_fee_base() + } } - /// Calculate transaction weight as a block - pub fn tx_weight_as_block(&self) -> u64 { - self.body.body_weight_as_block() + /// Old weight definition for pool acceptance + pub fn old_weight_by_iok(num_inputs: u64, num_outputs: u64, num_kernels: u64) -> u64 { + let body_weight = num_outputs + .saturating_mul(4) + .saturating_add(num_kernels) + .saturating_sub(num_inputs); + max(body_weight, 1) } /// Calculate transaction weight from transaction details - pub fn weight(num_inputs: u64, num_outputs: u64, num_kernels: u64) -> u64 { - TransactionBody::weight(num_inputs, num_outputs, num_kernels) + pub fn weight_by_iok(num_inputs: u64, num_outputs: u64, num_kernels: u64) -> u64 { + TransactionBody::weight_by_iok(num_inputs, num_outputs, num_kernels) } } @@ -1523,21 +1736,23 @@ pub fn deaggregate(mk_tx: Transaction, txs: &[Transaction]) -> Result>(); let negative_keys = kernel_offsets .into_iter() .filter(|x| *x != BlindingFactor::zero()) - .filter_map(|x| x.secret_key().ok()) + .filter_map(|x| x.secret_key(&secp).ok()) .collect::>(); if positive_key.is_empty() && negative_keys.is_empty() { BlindingFactor::zero() } else { - let sum = secp::Secp256k1::blind_sum(positive_key, negative_keys)?; + let sum = secp.blind_sum(positive_key, negative_keys)?; BlindingFactor::from_secret_key(sum) } }; @@ -2150,7 +2365,7 @@ mod test { let sig = secp::Signature::from_raw_data(&[0; 64]).unwrap(); let kernel = TxKernel { - features: KernelFeatures::Plain { fee: 10 }, + features: KernelFeatures::Plain { fee: 10.into() }, excess: commit, excess_sig: sig.clone(), }; @@ -2159,8 +2374,10 @@ mod test { for version in vec![ProtocolVersion(1), ProtocolVersion(2)] { let mut vec = vec![]; ser::serialize(&mut vec, version, &kernel).expect("serialized failed"); - let kernel2: TxKernel = ser::deserialize(&mut &vec[..], version).unwrap(); - assert_eq!(kernel2.features, KernelFeatures::Plain { fee: 10 }); + let kernel2: TxKernel = + ser::deserialize(&mut &vec[..], version, ser::DeserializationMode::default()) + .unwrap(); + assert_eq!(kernel2.features, KernelFeatures::Plain { fee: 10.into() }); assert_eq!(kernel2.excess, commit); assert_eq!(kernel2.excess_sig, sig.clone()); } @@ -2169,7 +2386,7 @@ mod test { let mut vec = vec![]; ser::serialize_default(&mut vec, &kernel).expect("serialized failed"); let kernel2: TxKernel = ser::deserialize_default(&mut &vec[..]).unwrap(); - assert_eq!(kernel2.features, KernelFeatures::Plain { fee: 10 }); + assert_eq!(kernel2.features, KernelFeatures::Plain { fee: 10.into() }); assert_eq!(kernel2.excess, commit); assert_eq!(kernel2.excess_sig, sig.clone()); } @@ -2188,7 +2405,7 @@ mod test { // now check a kernel with lock_height serialize/deserialize correctly let kernel = TxKernel { features: KernelFeatures::HeightLocked { - fee: 10, + fee: 10.into(), lock_height: 100, }, excess: commit, @@ -2199,7 +2416,9 @@ mod test { for version in vec![ProtocolVersion(1), ProtocolVersion(2)] { let mut vec = vec![]; ser::serialize(&mut vec, version, &kernel).expect("serialized failed"); - let kernel2: TxKernel = ser::deserialize(&mut &vec[..], version).unwrap(); + let kernel2: TxKernel = + ser::deserialize(&mut &vec[..], version, ser::DeserializationMode::default()) + .unwrap(); assert_eq!(kernel.features, kernel2.features); assert_eq!(kernel2.excess, commit); assert_eq!(kernel2.excess_sig, sig.clone()); @@ -2230,7 +2449,7 @@ mod test { // now check an NRD kernel will serialize/deserialize correctly let kernel = TxKernel { features: KernelFeatures::NoRecentDuplicate { - fee: 10, + fee: 10.into(), relative_height: NRDRelativeHeight(100), }, excess: commit, @@ -2241,7 +2460,9 @@ mod test { for version in vec![ProtocolVersion(1), ProtocolVersion(2)] { let mut vec = vec![]; ser::serialize(&mut vec, version, &kernel).expect("serialized failed"); - let kernel2: TxKernel = ser::deserialize(&mut &vec[..], version).unwrap(); + let kernel2: TxKernel = + ser::deserialize(&mut &vec[..], version, ser::DeserializationMode::default()) + .unwrap(); assert_eq!(kernel.features, kernel2.features); assert_eq!(kernel2.excess, commit); assert_eq!(kernel2.excess_sig, sig.clone()); @@ -2262,7 +2483,7 @@ mod test { let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0); let mut kernel = TxKernel::with_features(KernelFeatures::NoRecentDuplicate { - fee: 10, + fee: 10.into(), relative_height: NRDRelativeHeight(100), }); @@ -2275,7 +2496,7 @@ mod test { let skey = keychain .derive_key(0, &key_id, SwitchCommitmentType::Regular) .unwrap(); - let pubkey = excess.to_pubkey().unwrap(); + let pubkey = excess.to_pubkey(keychain.secp()).unwrap(); let excess_sig = aggsig::sign_single(&keychain.secp(), &msg, &skey, None, Some(&pubkey)).unwrap(); @@ -2288,25 +2509,25 @@ mod test { // Modify the fee and check signature no longer verifies. kernel.features = KernelFeatures::NoRecentDuplicate { - fee: 9, + fee: 9.into(), relative_height: NRDRelativeHeight(100), }; assert_eq!(kernel.verify(), Err(Error::IncorrectSignature)); // Modify the relative_height and check signature no longer verifies. kernel.features = KernelFeatures::NoRecentDuplicate { - fee: 10, + fee: 10.into(), relative_height: NRDRelativeHeight(101), }; assert_eq!(kernel.verify(), Err(Error::IncorrectSignature)); // Swap the features out for something different and check signature no longer verifies. - kernel.features = KernelFeatures::Plain { fee: 10 }; + kernel.features = KernelFeatures::Plain { fee: 10.into() }; assert_eq!(kernel.verify(), Err(Error::IncorrectSignature)); // Check signature verifies if we use the original features. kernel.features = KernelFeatures::NoRecentDuplicate { - fee: 10, + fee: 10.into(), relative_height: NRDRelativeHeight(100), }; assert_eq!(kernel.verify(), Ok(())); @@ -2367,7 +2588,7 @@ mod test { let mut vec = vec![]; ser::serialize_default(&mut vec, &(0u8, 10u64, 0u64))?; let features: KernelFeatures = ser::deserialize_default(&mut &vec[..])?; - assert_eq!(features, KernelFeatures::Plain { fee: 10 }); + assert_eq!(features, KernelFeatures::Plain { fee: 10.into() }); let mut vec = vec![]; ser::serialize_default(&mut vec, &(1u8, 0u64, 0u64))?; @@ -2380,7 +2601,7 @@ mod test { assert_eq!( features, KernelFeatures::HeightLocked { - fee: 10, + fee: 10.into(), lock_height: 100 } ); @@ -2420,7 +2641,7 @@ mod test { assert_eq!( features, KernelFeatures::NoRecentDuplicate { - fee: 10, + fee: 10.into(), relative_height: NRDRelativeHeight(100) } ); diff --git a/core/src/core/verifier_cache.rs b/core/src/core/verifier_cache.rs deleted file mode 100644 index 6e1a0836e9..0000000000 --- a/core/src/core/verifier_cache.rs +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2020 The Grin Developers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! VerifierCache trait for batch verifying outputs and kernels. -//! We pass a "caching verifier" into the block validation processing with this. - -use crate::core::hash::{Hash, Hashed}; -use crate::core::{Output, TxKernel}; -use lru_cache::LruCache; - -/// Verifier cache for caching expensive verification results. -/// Specifically the following - -/// * kernel signature verification -/// * output rangeproof verification -pub trait VerifierCache: Sync + Send { - /// Takes a vec of tx kernels and returns those kernels - /// that have not yet been verified. - fn filter_kernel_sig_unverified(&mut self, kernels: &[TxKernel]) -> Vec; - /// Takes a vec of tx outputs and returns those outputs - /// that have not yet had their rangeproofs verified. - fn filter_rangeproof_unverified(&mut self, outputs: &[Output]) -> Vec; - /// Adds a vec of tx kernels to the cache (used in conjunction with the the filter above). - fn add_kernel_sig_verified(&mut self, kernels: Vec); - /// Adds a vec of outputs to the cache (used in conjunction with the the filter above). - fn add_rangeproof_verified(&mut self, outputs: Vec); -} - -/// An implementation of verifier_cache using lru_cache. -/// Caches tx kernels by kernel hash. -/// Caches outputs by output rangeproof hash (rangeproofs are committed to separately). -pub struct LruVerifierCache { - kernel_sig_verification_cache: LruCache, - rangeproof_verification_cache: LruCache, -} - -impl LruVerifierCache { - /// TODO how big should these caches be? - /// They need to be *at least* large enough to cover a maxed out block. - pub fn new() -> LruVerifierCache { - LruVerifierCache { - kernel_sig_verification_cache: LruCache::new(50_000), - rangeproof_verification_cache: LruCache::new(50_000), - } - } -} - -impl VerifierCache for LruVerifierCache { - fn filter_kernel_sig_unverified(&mut self, kernels: &[TxKernel]) -> Vec { - let res = kernels - .iter() - .filter(|x| !self.kernel_sig_verification_cache.contains_key(&x.hash())) - .cloned() - .collect::>(); - trace!( - "lru_verifier_cache: kernel sigs: {}, not cached (must verify): {}", - kernels.len(), - res.len() - ); - res - } - - fn filter_rangeproof_unverified(&mut self, outputs: &[Output]) -> Vec { - let res = outputs - .iter() - .filter(|x| { - !self - .rangeproof_verification_cache - .contains_key(&x.proof.hash()) - }) - .cloned() - .collect::>(); - trace!( - "lru_verifier_cache: rangeproofs: {}, not cached (must verify): {}", - outputs.len(), - res.len() - ); - res - } - - fn add_kernel_sig_verified(&mut self, kernels: Vec) { - for k in kernels { - self.kernel_sig_verification_cache.insert(k.hash(), ()); - } - } - - fn add_rangeproof_verified(&mut self, outputs: Vec) { - for o in outputs { - self.rangeproof_verification_cache - .insert(o.proof.hash(), ()); - } - } -} diff --git a/core/src/genesis.rs b/core/src/genesis.rs index 91011c675e..77f388577f 100644 --- a/core/src/genesis.rs +++ b/core/src/genesis.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -24,7 +24,6 @@ use crate::core::hash::Hash; use crate::pow::{Difficulty, Proof, ProofOfWork}; use chrono::prelude::{TimeZone, Utc}; use keychain::BlindingFactor; -use util; use util::secp::constants::SINGLE_BULLET_PROOF_SIZE; use util::secp::pedersen::{Commitment, RangeProof}; use util::secp::Signature; @@ -35,7 +34,7 @@ use util::secp::Signature; pub fn genesis_dev() -> core::Block { core::Block::with_header(core::BlockHeader { height: 0, - timestamp: Utc.ymd(1997, 8, 4).and_hms(0, 0, 0), + timestamp: Utc.with_ymd_and_hms(1997, 8, 4, 0, 0, 0).unwrap(), pow: ProofOfWork { nonce: 0, ..Default::default() @@ -48,7 +47,7 @@ pub fn genesis_dev() -> core::Block { pub fn genesis_floo() -> core::Block { let gen = core::Block::with_header(core::BlockHeader { height: 0, - timestamp: Utc.ymd(2019, 5, 26).and_hms(16, 30, 1), + timestamp: Utc.with_ymd_and_hms(2019, 5, 26, 16, 30, 1).unwrap(), prev_root: Hash::from_hex( "000000000000000000257647fb29ce964ddf2b27c639ae60c4c90fafe5c42e53", ) @@ -161,7 +160,7 @@ pub fn genesis_floo() -> core::Block { pub fn genesis_main() -> core::Block { let gen = core::Block::with_header(core::BlockHeader { height: 0, - timestamp: Utc.ymd(2019, 11, 11).and_hms(9, 0, 0), + timestamp: Utc.with_ymd_and_hms(2019, 11, 11, 9, 0, 0).unwrap(), prev_root: Hash::from_hex( "00000000000000000004f2fb2ee749923a8131028aeae637070b0b4145617d42", ) diff --git a/core/src/global.rs b/core/src/global.rs index c1eb793fb4..c2d753de21 100644 --- a/core/src/global.rs +++ b/core/src/global.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,13 +16,16 @@ //! having to pass them all over the place, but aren't consensus values. //! should be used sparingly. +use crate::consensus; use crate::consensus::{ - graph_weight, HeaderInfo, BASE_EDGE_BITS, BLOCK_KERNEL_WEIGHT, BLOCK_OUTPUT_WEIGHT, - BLOCK_TIME_SEC, COINBASE_MATURITY, CUT_THROUGH_HORIZON, DAY_HEIGHT, DEFAULT_MIN_EDGE_BITS, - DIFFICULTY_ADJUST_WINDOW, INITIAL_DIFFICULTY, MAX_BLOCK_WEIGHT, PROOFSIZE, + graph_weight, HeaderDifficultyInfo, BASE_EDGE_BITS, BLOCK_TIME_SEC, COINBASE_MATURITY, + CUT_THROUGH_HORIZON, DAY_HEIGHT, DEFAULT_MIN_EDGE_BITS, DIFFICULTY_ADJUST_WINDOW, + INITIAL_DIFFICULTY, KERNEL_WEIGHT, MAX_BLOCK_WEIGHT, OUTPUT_WEIGHT, PROOFSIZE, SECOND_POW_EDGE_BITS, STATE_SYNC_THRESHOLD, }; -use crate::pow::{self, new_cuckarood_ctx, new_cuckatoo_ctx, PoWContext}; +use crate::core::block::Block; +use crate::genesis; +use crate::pow::{self, new_cuckarood_ctx, new_cuckatoo_ctx, PoWContext, Proof}; use crate::ser::ProtocolVersion; use std::cell::Cell; use std::sync::atomic::{AtomicBool, Ordering}; @@ -40,7 +43,7 @@ use util::OneTime; /// We negotiate compatible versions with each peer via Hand/Shake. /// Note: We also use a specific (possible different) protocol version /// for both the backend database and MMR data files. -/// NOTE, grin bump the protocol version to 1000, but in any case fo far 1,2,3 are supported. +/// NOTE, grin bump the protocol version to 1000, but in any case so far 1,2,3 are supported. pub const PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion(3); /// Automated testing edge_bits @@ -79,6 +82,9 @@ pub const TESTING_INITIAL_DIFFICULTY: u64 = 1; /// Testing max_block_weight (artifically low, just enough to support a few txs). pub const TESTING_MAX_BLOCK_WEIGHT: u64 = 250; +/// Default unit of fee per tx weight, making each output cost about a Grincent +pub const DEFAULT_ACCEPT_FEE_BASE: u64 = consensus::MILLI_GRIN; // Keeping default base is same, no changes for MWC GRIN_BASE / 100 / 20; // 500_000 + /// If a peer's last updated difficulty is 2 hours ago and its difficulty's lower than ours, /// we're sure this peer is a stuck node, and we will kick out such kind of stuck peers. pub const STUCK_PEER_KICK_TIME: i64 = 2 * 3600 * 1000; @@ -163,6 +169,12 @@ lazy_static! { /// to be overridden on a per-thread basis (for testing). pub static ref GLOBAL_CHAIN_TYPE: OneTime = OneTime::new(); + /// Global acccept fee base that must be initialized once on node startup. + /// This is accessed via get_acccept_fee_base() which allows the global value + /// to be overridden on a per-thread basis (for testing). + pub static ref GLOBAL_ACCEPT_FEE_BASE: OneTime = OneTime::new(); + + /// Global feature flag for NRD kernel support. /// If enabled NRD kernels are treated as valid after HF3 (based on header version). /// If disabled NRD kernels are invalid regardless of header version or block height. @@ -177,10 +189,18 @@ thread_local! { /// Mainnet|Floonet|UserTesting|AutomatedTesting pub static CHAIN_TYPE: Cell> = Cell::new(None); + /// minimum transaction fee per unit of transaction weight for mempool acceptance + pub static ACCEPT_FEE_BASE: Cell> = Cell::new(None); + /// Local feature flag for NRD kernel support. pub static NRD_FEATURE_ENABLED: Cell> = Cell::new(None); } +/// Set the global chain_type using an override +pub fn set_global_chain_type(new_type: ChainTypes) { + GLOBAL_CHAIN_TYPE.set(new_type, true); +} + /// Set the chain type on a per-thread basis via thread_local storage. pub fn set_local_chain_type(new_type: ChainTypes) { CHAIN_TYPE.with(|chain_type| chain_type.set(Some(new_type))) @@ -190,18 +210,26 @@ pub fn set_local_chain_type(new_type: ChainTypes) { pub fn get_chain_type() -> ChainTypes { CHAIN_TYPE.with(|chain_type| match chain_type.get() { None => { - if GLOBAL_CHAIN_TYPE.is_init() { - let chain_type = GLOBAL_CHAIN_TYPE.borrow(); - set_local_chain_type(chain_type); - chain_type - } else { + if !GLOBAL_CHAIN_TYPE.is_init() { panic!("GLOBAL_CHAIN_TYPE and CHAIN_TYPE unset. Consider set_local_chain_type() in tests."); } + let chain_type = GLOBAL_CHAIN_TYPE.borrow(); + set_local_chain_type(chain_type); + chain_type } Some(chain_type) => chain_type, }) } +/// Return genesis block for the active chain type +pub fn get_genesis_block() -> Block { + match get_chain_type() { + ChainTypes::Mainnet => genesis::genesis_main(), + ChainTypes::Floonet => genesis::genesis_floo(), + _ => genesis::genesis_dev(), + } +} + /// One time initialization of the global chain_type. /// Will panic if we attempt to re-initialize this (via OneTime). pub fn init_global_chain_type(new_type: ChainTypes) { @@ -214,6 +242,11 @@ pub fn init_global_nrd_enabled(enabled: bool) { GLOBAL_NRD_FEATURE_ENABLED.init(enabled) } +/// Set the global NRD feature flag using override. +pub fn set_global_nrd_enabled(enabled: bool) { + GLOBAL_NRD_FEATURE_ENABLED.set(enabled, true) +} + /// Explicitly enable the NRD global feature flag. pub fn set_local_nrd_enabled(enabled: bool) { NRD_FEATURE_ENABLED.with(|flag| flag.set(Some(enabled))) @@ -238,6 +271,40 @@ pub fn is_nrd_enabled() -> bool { }) } +/// One time initialization of the global accept fee base +/// Will panic if we attempt to re-initialize this (via OneTime). +pub fn init_global_accept_fee_base(new_base: u64) { + GLOBAL_ACCEPT_FEE_BASE.init(new_base) +} + +/// The global accept fee base may be reset using override. +pub fn set_global_accept_fee_base(new_base: u64) { + GLOBAL_ACCEPT_FEE_BASE.set(new_base, true) +} + +/// Set the accept fee base on a per-thread basis via thread_local storage. +pub fn set_local_accept_fee_base(new_base: u64) { + ACCEPT_FEE_BASE.with(|base| base.set(Some(new_base))) +} + +/// Accept Fee Base +/// Look at thread local config first. If not set fallback to global config. +/// Default to grin-cent/20 if global config unset. +pub fn get_accept_fee_base() -> u64 { + ACCEPT_FEE_BASE.with(|base| match base.get() { + None => { + let base = if GLOBAL_ACCEPT_FEE_BASE.is_init() { + GLOBAL_ACCEPT_FEE_BASE.borrow() + } else { + DEFAULT_ACCEPT_FEE_BASE + }; + set_local_accept_fee_base(base); + base + } + Some(base) => base, + }) +} + /// Return either a cuckoo context or a cuckatoo context /// Single change point /// MWC: We modify this to launch with cuckarood only on both floonet and mainnet @@ -331,7 +398,7 @@ pub fn max_block_weight() -> u64 { /// Maximum allowed transaction weight (1 weight unit ~= 32 bytes) pub fn max_tx_weight() -> u64 { - let coinbase_weight = BLOCK_OUTPUT_WEIGHT + BLOCK_KERNEL_WEIGHT; + let coinbase_weight = OUTPUT_WEIGHT + KERNEL_WEIGHT; max_block_weight().saturating_sub(coinbase_weight) as u64 } @@ -415,13 +482,14 @@ pub fn get_network_name() -> String { /// vector and pads if needed (which will) only be needed for the first few /// blocks after genesis -pub fn difficulty_data_to_vector(cursor: T) -> Vec +pub fn difficulty_data_to_vector(cursor: T) -> Vec where - T: IntoIterator, + T: IntoIterator, { // Convert iterator to vector, so we can append to it if necessary let needed_block_count = DIFFICULTY_ADJUST_WINDOW as usize + 1; - let mut last_n: Vec = cursor.into_iter().take(needed_block_count).collect(); + let mut last_n: Vec = + cursor.into_iter().take(needed_block_count).collect(); // Only needed just after blockchain launch... basically ensures there's // always enough data by simulating perfectly timed pre-genesis @@ -439,13 +507,61 @@ where let mut last_ts = last_n.last().unwrap().timestamp; for _ in n..needed_block_count { last_ts = last_ts.saturating_sub(last_ts_delta); - last_n.push(HeaderInfo::from_ts_diff(last_ts, last_diff)); + last_n.push(HeaderDifficultyInfo::from_ts_diff(last_ts, last_diff)); } } last_n.reverse(); last_n } +/// Calculates the size of a header (in bytes) given a number of edge bits in the PoW +#[inline] +pub fn header_size_bytes(edge_bits: u8) -> usize { + let size = 2 + 2 * 8 + 5 * 32 + 32 + 2 * 8; + let proof_size = 8 + 4 + 8 + 1 + Proof::pack_len(edge_bits); + size + proof_size +} + +#[cfg(test)] +mod test { + use super::*; + use crate::core::Block; + use crate::genesis::*; + use crate::pow::mine_genesis_block; + use crate::ser::{BinWriter, Writeable}; + + fn test_header_len(genesis: Block) { + let mut raw = Vec::::with_capacity(1_024); + let mut writer = BinWriter::new(&mut raw, ProtocolVersion::local()); + genesis.header.write(&mut writer).unwrap(); + assert_eq!(raw.len(), header_size_bytes(genesis.header.pow.edge_bits())); + } + + #[test] + fn automated_testing_header_len() { + set_local_chain_type(ChainTypes::AutomatedTesting); + test_header_len(mine_genesis_block().unwrap()); + } + + #[test] + fn user_testing_header_len() { + set_local_chain_type(ChainTypes::UserTesting); + test_header_len(mine_genesis_block().unwrap()); + } + + #[test] + fn floonet_header_len() { + set_local_chain_type(ChainTypes::Floonet); + test_header_len(genesis_floo()); + } + + #[test] + fn mainnet_header_len() { + set_local_chain_type(ChainTypes::Mainnet); + test_header_len(genesis_main()); + } +} + /// Checking running status if the server pub fn is_server_running() -> bool { SERVER_RUNNING.load(Ordering::SeqCst) diff --git a/core/src/lib.rs b/core/src/lib.rs index 4e4d43ce10..7704e183ed 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -27,12 +27,8 @@ extern crate enum_primitive; extern crate lazy_static; #[macro_use] extern crate serde_derive; -use serde; #[macro_use] extern crate log; -use failure; -#[macro_use] -extern crate failure_derive; #[macro_use] pub mod macros; diff --git a/core/src/libtx/aggsig.rs b/core/src/libtx/aggsig.rs index 8595b9fd0c..1a8c8a0cc1 100644 --- a/core/src/libtx/aggsig.rs +++ b/core/src/libtx/aggsig.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,7 +16,7 @@ //! This module interfaces into the underlying //! [Rust Aggsig library](https://github.com/mimblewimble/rust-secp256k1-zkp/blob/master/src/aggsig.rs) -use crate::libtx::error::{Error, ErrorKind}; +use crate::libtx::error::Error; use blake2::blake2b::Blake2b; use keychain::{BlindingFactor, Identifier, Keychain, SwitchCommitmentType}; use util::secp::key::{PublicKey, SecretKey}; @@ -75,7 +75,7 @@ pub fn create_secnonce(secp: &Secp256k1) -> Result { /// /// let secp = Secp256k1::with_caps(ContextFlag::SignOnly); /// let secret_nonce = aggsig::create_secnonce(&secp).unwrap(); -/// let secret_key = SecretKey::new(&mut thread_rng()); +/// let secret_key = SecretKey::new(&secp, &mut thread_rng()); /// let pub_nonce_sum = PublicKey::from_secret_key(&secp, &secret_nonce).unwrap(); /// // ... Add all other participating nonces /// let pub_key_sum = PublicKey::from_secret_key(&secp, &secret_key).unwrap(); @@ -145,7 +145,7 @@ pub fn calculate_partial_sig( /// /// let secp = Secp256k1::with_caps(ContextFlag::Full); /// let secret_nonce = aggsig::create_secnonce(&secp).unwrap(); -/// let secret_key = SecretKey::new(&mut thread_rng()); +/// let secret_key = SecretKey::new(&secp, &mut thread_rng()); /// let pub_nonce_sum = PublicKey::from_secret_key(&secp, &secret_nonce).unwrap(); /// // ... Add all other participating nonces /// let pub_key_sum = PublicKey::from_secret_key(&secp, &secret_key).unwrap(); @@ -193,7 +193,7 @@ pub fn verify_partial_sig( pubkey_sum, true, ) { - return Err(ErrorKind::Signature("Signature validation error".to_string()).into()); + return Err(Error::Signature("Signature validation error".to_string())); } Ok(()) } @@ -226,6 +226,7 @@ pub fn verify_partial_sig( /// use core::core::transaction::KernelFeatures; /// use core::core::{Output, OutputFeatures}; /// use keychain::{Keychain, ExtKeychain, SwitchCommitmentType}; +/// use std::convert::TryInto; /// use core::global; /// /// global::set_local_chain_type(global::ChainTypes::Floonet); @@ -242,10 +243,10 @@ pub fn verify_partial_sig( /// let height = 20; /// let over_commit = secp.commit_value(reward(fees, height)).unwrap(); /// let out_commit = output.commitment(); -/// let features = KernelFeatures::HeightLocked{fee: 0, lock_height: height}; +/// let features = KernelFeatures::HeightLocked{fee: 1.into(), lock_height: height}; /// let msg = features.kernel_sig_msg().unwrap(); -/// let excess = Secp256k1::commit_sum(vec![out_commit], vec![over_commit]).unwrap(); -/// let pubkey = excess.to_pubkey().unwrap(); +/// let excess = secp.commit_sum(vec![out_commit], vec![over_commit]).unwrap(); +/// let pubkey = excess.to_pubkey(&secp).unwrap(); /// let sig = aggsig::sign_from_key_id(&secp, &keychain, &msg, value, &key_id, None, Some(&pubkey)).unwrap(); /// ``` @@ -290,6 +291,7 @@ where /// use core::core::transaction::KernelFeatures; /// use core::core::{Output, OutputFeatures}; /// use keychain::{Keychain, ExtKeychain, SwitchCommitmentType}; +/// use std::convert::TryInto; /// use core::global; /// /// // Create signature @@ -307,10 +309,10 @@ where /// let height = 20; /// let over_commit = secp.commit_value(reward(fees, height)).unwrap(); /// let out_commit = output.commitment(); -/// let features = KernelFeatures::HeightLocked{fee: 0, lock_height: height}; +/// let features = KernelFeatures::HeightLocked{fee: 1.into(), lock_height: height}; /// let msg = features.kernel_sig_msg().unwrap(); -/// let excess = Secp256k1::commit_sum(vec![out_commit], vec![over_commit]).unwrap(); -/// let pubkey = excess.to_pubkey().unwrap(); +/// let excess = secp.commit_sum(vec![out_commit], vec![over_commit]).unwrap(); +/// let pubkey = excess.to_pubkey(&secp).unwrap(); /// let sig = aggsig::sign_from_key_id(&secp, &keychain, &msg, value, &key_id, None, Some(&pubkey)).unwrap(); /// /// // Verify the signature from the excess commit @@ -325,9 +327,9 @@ pub fn verify_single_from_commit( msg: &Message, commit: &Commitment, ) -> Result<(), Error> { - let pubkey = commit.to_pubkey()?; + let pubkey = commit.to_pubkey(secp)?; if !verify_single(secp, sig, msg, None, &pubkey, Some(&pubkey), false) { - return Err(ErrorKind::Signature("Signature validation error".to_string()).into()); + return Err(Error::Signature("Signature validation error".to_string())); } Ok(()) } @@ -360,7 +362,7 @@ pub fn verify_single_from_commit( /// /// let secp = Secp256k1::with_caps(ContextFlag::Full); /// let secret_nonce = aggsig::create_secnonce(&secp).unwrap(); -/// let secret_key = SecretKey::new(&mut thread_rng()); +/// let secret_key = SecretKey::new(&secp, &mut thread_rng()); /// let pub_nonce_sum = PublicKey::from_secret_key(&secp, &secret_nonce).unwrap(); /// // ... Add all other participating nonces /// let pub_key_sum = PublicKey::from_secret_key(&secp, &secret_key).unwrap(); @@ -395,7 +397,7 @@ pub fn verify_completed_sig( msg: &secp::Message, ) -> Result<(), Error> { if !verify_single(secp, sig, msg, None, pubkey, pubkey_sum, true) { - return Err(ErrorKind::Signature("Signature validation error".to_string()).into()); + return Err(Error::Signature("Signature validation error".to_string())); } Ok(()) } @@ -411,6 +413,16 @@ pub fn add_signatures( Ok(sig) } +/// Subtract a partial signature from a completed signature +/// see https://github.com/mimblewimble/rust-secp256k1-zkp/blob/e9e4f09bd0c85da914774a52219457ba10ac3e57/src/aggsig.rs#L267 +pub fn subtract_signature( + secp: &Secp256k1, + sig: &Signature, + partial_sig: &Signature, +) -> Result<(Signature, Option), Error> { + let sig = aggsig::subtract_partial_signature(secp, sig, partial_sig)?; + Ok(sig) +} /// Just a simple sig, creates its own nonce if not provided pub fn sign_single( secp: &Secp256k1, @@ -455,7 +467,7 @@ pub fn sign_with_blinding( blinding: &BlindingFactor, pubkey_sum: Option<&PublicKey>, ) -> Result { - let skey = &blinding.secret_key()?; + let skey = &blinding.secret_key(secp)?; let sig = aggsig::sign_single(secp, &msg, skey, None, None, None, pubkey_sum, None)?; Ok(sig) } @@ -481,9 +493,9 @@ pub fn sign_dual_key( hasher.update(pk1.0.as_ref()); hasher.update(pk2.0.as_ref()); - let mut sk = SecretKey::from_slice(hasher.finalize().as_bytes())?; - sk.mul_assign(&sk2)?; - sk.add_assign(&sk1)?; + let mut sk = SecretKey::from_slice(secp, hasher.finalize().as_bytes())?; + sk.mul_assign(secp, &sk2)?; + sk.add_assign(secp, &sk1)?; let pubkey = PublicKey::from_secret_key(&secp, &sk)?; let sig = sign_single(&secp, &msg, &sk, None, Some(&pubkey))?; @@ -503,11 +515,11 @@ pub fn build_composite_pubkey( let mut hasher = Blake2b::new(32); hasher.update(pk1.0.as_ref()); hasher.update(pk2.0.as_ref()); - let sk = SecretKey::from_slice(hasher.finalize().as_bytes())?; + let sk = SecretKey::from_slice(secp, hasher.finalize().as_bytes())?; let mut pk = pk2.clone(); - pk.mul_assign(&secp, &sk)?; - let pubkey = PublicKey::from_combination(vec![&pk1, &pk])?; + pk.mul_assign(secp, &sk)?; + let pubkey = PublicKey::from_combination(secp, vec![&pk1, &pk])?; Ok(pubkey) } @@ -538,8 +550,8 @@ mod test { thread_rng().fill(&mut msg_bytes); let msg = Message::from_slice(&msg_bytes).unwrap(); - let sk1 = SecretKey::new(&mut thread_rng()); - let sk2 = SecretKey::new(&mut thread_rng()); + let sk1 = SecretKey::new(&secp, &mut thread_rng()); + let sk2 = SecretKey::new(&secp, &mut thread_rng()); let batch_sig = sign_dual_key(&secp, &msg, &sk1, &sk2).unwrap(); let pk1 = PublicKey::from_secret_key(&secp, &sk1).unwrap(); diff --git a/core/src/libtx/build.rs b/core/src/libtx/build.rs index a827eb9083..eecbc75caf 100644 --- a/core/src/libtx/build.rs +++ b/core/src/libtx/build.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -23,7 +23,7 @@ //! //! Example: //! build::transaction( -//! KernelFeatures::Plain{ fee: 2 }, +//! KernelFeatures::Plain{ fee: 2.try_into().unwrap() }, //! vec![ //! input_rand(75), //! output_rand(42), @@ -213,10 +213,10 @@ where let msg = kernel.msg_to_sign()?; // Generate kernel public excess and associated signature. - let excess = BlindingFactor::rand(); - let skey = excess.secret_key()?; + let excess = BlindingFactor::rand(keychain.secp()); + let skey = excess.secret_key(keychain.secp())?; kernel.excess = keychain.secp().commit(0, skey)?; - let pubkey = &kernel.excess.to_pubkey()?; + let pubkey = &kernel.excess.to_pubkey(keychain.secp())?; kernel.excess_sig = aggsig::sign_with_blinding(&keychain.secp(), &msg, &excess, Some(&pubkey))?; kernel.verify()?; transaction_with_kernel(elems, kernel, excess, keychain, builder) @@ -246,27 +246,19 @@ where // Update tx with new kernel and offset. let mut tx = tx.replace_kernel(kernel); - tx.offset = blind_sum.split(&excess)?; + tx.offset = blind_sum.split(&excess, keychain.secp())?; Ok(tx) } // Just a simple test, most exhaustive tests in the core. #[cfg(test)] mod test { - use std::sync::Arc; - use util::RwLock; - use super::*; use crate::core::transaction::Weighting; - use crate::core::verifier_cache::{LruVerifierCache, VerifierCache}; use crate::global; use crate::libtx::ProofBuilder; use keychain::{ExtKeychain, ExtKeychainPath}; - fn verifier_cache() -> Arc> { - Arc::new(RwLock::new(LruVerifierCache::new())) - } - #[test] fn blind_simple_tx() { global::set_local_chain_type(global::ChainTypes::AutomatedTesting); @@ -276,17 +268,16 @@ mod test { let key_id2 = ExtKeychainPath::new(1, 2, 0, 0, 0).to_identifier(); let key_id3 = ExtKeychainPath::new(1, 3, 0, 0, 0).to_identifier(); - let vc = verifier_cache(); - let tx = transaction( - KernelFeatures::Plain { fee: 2 }, + KernelFeatures::Plain { fee: 2.into() }, &[input(10, key_id1), input(12, key_id2), output(20, key_id3)], &keychain, &builder, ) .unwrap(); - tx.validate(Weighting::AsTransaction, vc.clone()).unwrap(); + let height = 42; // arbitrary + tx.validate(Weighting::AsTransaction, height).unwrap(); } #[test] @@ -298,17 +289,16 @@ mod test { let key_id2 = ExtKeychainPath::new(1, 2, 0, 0, 0).to_identifier(); let key_id3 = ExtKeychainPath::new(1, 3, 0, 0, 0).to_identifier(); - let vc = verifier_cache(); - let tx = transaction( - KernelFeatures::Plain { fee: 2 }, + KernelFeatures::Plain { fee: 2.into() }, &[input(10, key_id1), input(12, key_id2), output(20, key_id3)], &keychain, &builder, ) .unwrap(); - tx.validate(Weighting::AsTransaction, vc.clone()).unwrap(); + let height = 42; // arbitrary + tx.validate(Weighting::AsTransaction, height).unwrap(); } #[test] @@ -319,16 +309,15 @@ mod test { let key_id1 = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier(); let key_id2 = ExtKeychainPath::new(1, 2, 0, 0, 0).to_identifier(); - let vc = verifier_cache(); - let tx = transaction( - KernelFeatures::Plain { fee: 4 }, + KernelFeatures::Plain { fee: 4.into() }, &[input(6, key_id1), output(2, key_id2)], &keychain, &builder, ) .unwrap(); - tx.validate(Weighting::AsTransaction, vc.clone()).unwrap(); + let height = 42; // arbitrary + tx.validate(Weighting::AsTransaction, height).unwrap(); } } diff --git a/core/src/libtx/error.rs b/core/src/libtx/error.rs index 5b78ea951c..73623c30b7 100644 --- a/core/src/libtx/error.rs +++ b/core/src/libtx/error.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,97 +14,40 @@ //! libtx specific errors use crate::core::transaction; -use failure::{Backtrace, Context, Fail}; -use keychain; -use std::fmt::{self, Display}; use util::secp; /// Lib tx error definition -#[derive(Debug)] -pub struct Error { - inner: Context, -} - -#[derive(Clone, Debug, Eq, Fail, PartialEq, Serialize, Deserialize)] +#[derive(Clone, Debug, Eq, thiserror::Error, PartialEq, Serialize, Deserialize)] /// Libwallet error types -pub enum ErrorKind { +pub enum Error { /// SECP error - #[fail(display = "LibTx Secp Error, {}", _0)] - Secp(secp::Error), + #[error("LibTx Secp Error, {source:?}")] + Secp { + /// SECP error + #[from] + source: secp::Error, + }, /// Keychain error - #[fail(display = "LibTx Keychain Error, {}", _0)] - Keychain(keychain::Error), + #[error("LibTx Keychain Error, {source:?}")] + Keychain { + /// Keychain error + #[from] + source: keychain::Error, + }, /// Transaction error - #[fail(display = "LibTx Transaction Error, {}", _0)] - Transaction(transaction::Error), + #[error("LibTx Transaction Error, {source:?}")] + Transaction { + /// Transaction error + #[from] + source: transaction::Error, + }, /// Signature error - #[fail(display = "LibTx Signature Error, {}", _0)] + #[error("LibTx Signature Error, {0}")] Signature(String), /// Rangeproof error - #[fail(display = "LibTx Rangeproof Error, {}", _0)] + #[error("LibTx Rangeproof Error, {0}")] RangeProof(String), /// Other error - #[fail(display = "LibTx Other Error, {}", _0)] + #[error("LibTx Other Error, {0}")] Other(String), } - -impl Fail for Error { - fn cause(&self) -> Option<&dyn Fail> { - self.inner.cause() - } - - fn backtrace(&self) -> Option<&Backtrace> { - self.inner.backtrace() - } -} - -impl Display for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - Display::fmt(&self.inner, f) - } -} - -impl Error { - /// Return errorkind - pub fn kind(&self) -> ErrorKind { - self.inner.get_context().clone() - } -} - -impl From for Error { - fn from(kind: ErrorKind) -> Error { - Error { - inner: Context::new(kind), - } - } -} - -impl From> for Error { - fn from(inner: Context) -> Error { - Error { inner } - } -} - -impl From for Error { - fn from(error: secp::Error) -> Error { - Error { - inner: Context::new(ErrorKind::Secp(error)), - } - } -} - -impl From for Error { - fn from(error: keychain::Error) -> Error { - Error { - inner: Context::new(ErrorKind::Keychain(error)), - } - } -} - -impl From for Error { - fn from(error: transaction::Error) -> Error { - Error { - inner: Context::new(ErrorKind::Transaction(error)), - } - } -} diff --git a/core/src/libtx/mod.rs b/core/src/libtx/mod.rs index b2e2e41df8..9b5ea0defc 100644 --- a/core/src/libtx/mod.rs +++ b/core/src/libtx/mod.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -28,26 +28,19 @@ pub mod proof; pub mod reward; pub mod secp_ser; -use crate::consensus; use crate::core::Transaction; +use crate::global::get_accept_fee_base; pub use self::proof::ProofBuilder; -pub use crate::libtx::error::{Error, ErrorKind}; +pub use crate::libtx::error::Error; -/// Fee base. Currently 0.001 MWC. The minimal tx fee will be scaled to this value -pub const DEFAULT_BASE_FEE: u64 = consensus::MILLI_GRIN; - -/// Transaction fee calculation -pub fn tx_fee( - input_len: usize, - output_len: usize, - kernel_len: usize, - base_fee: Option, -) -> u64 { - let use_base_fee = match base_fee { - Some(bf) => bf, - None => DEFAULT_BASE_FEE, - }; +/// Transaction fee calculation given numbers of inputs, outputs, and kernels +pub fn tx_fee(input_len: usize, output_len: usize, kernel_len: usize) -> u64 { + Transaction::weight_by_iok(input_len as u64, output_len as u64, kernel_len as u64) + * get_accept_fee_base() +} - Transaction::weight(input_len as u64, output_len as u64, kernel_len as u64) * use_base_fee +/// Transaction fee calculation given transaction +pub fn accept_fee(tx: Transaction, height: u64) -> u64 { + tx.accept_fee(height) } diff --git a/core/src/libtx/proof.rs b/core/src/libtx/proof.rs index ed22bf05c7..f6729dd140 100644 --- a/core/src/libtx/proof.rs +++ b/core/src/libtx/proof.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ //! Rangeproof library functions -use crate::libtx::error::{Error, ErrorKind}; +use crate::libtx::error::Error; use blake2::blake2b::blake2b; use keychain::extkey_bip32::BIP32GrinHasher; use keychain::{Identifier, Keychain, SwitchCommitmentType, ViewKey}; @@ -79,9 +79,9 @@ pub fn rewind( where B: ProofBuild, { - let nonce = b.rewind_nonce(secp, &commit).map_err(|e| { - ErrorKind::RangeProof(format!("Unable rewind for commit {:?}, {}", commit, e)) - })?; + let nonce = b + .rewind_nonce(secp, &commit) + .map_err(|e| Error::RangeProof(format!("Unable rewind for commit {:?}, {}", commit, e)))?; let info = secp.rewind_bullet_proof(commit, nonce, extra_data, proof); if info.is_err() { return Ok(None); @@ -92,7 +92,7 @@ where let check = b .check_output(secp, &commit, amount, info.message) .map_err(|e| { - ErrorKind::RangeProof(format!("Unable to check output for {:?}, {}", commit, e)) + Error::RangeProof(format!("Unable to check output for {:?}, {}", commit, e)) })?; Ok(check.map(|(id, switch)| (amount, id, switch))) @@ -146,7 +146,9 @@ where let private_hash = blake2b(32, &[], &private_root_key.0).as_bytes().to_vec(); - let public_root_key = keychain.public_root_key().serialize_vec(true); + let public_root_key = keychain + .public_root_key() + .serialize_vec(keychain.secp(), true); let rewind_hash = blake2b(32, &[], &public_root_key[..]).as_bytes().to_vec(); Self { @@ -163,12 +165,11 @@ where &self.rewind_hash }; let res = blake2b(32, &commit.0, hash); - SecretKey::from_slice(res.as_bytes()).map_err(|e| { - ErrorKind::RangeProof(format!( + SecretKey::from_slice(self.keychain.secp(), res.as_bytes()).map_err(|e| { + Error::RangeProof(format!( "Unable to extract nonce from commit {:?}, {}", commit, e )) - .into() }) } } @@ -281,12 +282,11 @@ where fn nonce(&self, commit: &Commitment) -> Result { let res = blake2b(32, &commit.0, &self.root_hash); - SecretKey::from_slice(res.as_bytes()).map_err(|e| { - ErrorKind::RangeProof(format!( + SecretKey::from_slice(self.keychain.secp(), res.as_bytes()).map_err(|e| { + Error::RangeProof(format!( "Unable to extract nonce from commit {:?}, {}", commit, e )) - .into() }) } } @@ -367,14 +367,13 @@ where } impl ProofBuild for ViewKey { - fn rewind_nonce(&self, _secp: &Secp256k1, commit: &Commitment) -> Result { + fn rewind_nonce(&self, secp: &Secp256k1, commit: &Commitment) -> Result { let res = blake2b(32, &commit.0, &self.rewind_hash); - SecretKey::from_slice(res.as_bytes()).map_err(|e| { - ErrorKind::RangeProof(format!( + SecretKey::from_slice(secp, res.as_bytes()).map_err(|e| { + Error::RangeProof(format!( "Unable to rewind nonce for commit {:?}, {}", commit, e )) - .into() }) } @@ -433,10 +432,10 @@ impl ProofBuild for ViewKey { if child_number.is_hardened() { return Ok(None); } - key = key.ckd_pub(&secp, &mut hasher, child_number)?; + key = key.ckd_pub(secp, &mut hasher, child_number)?; } let pub_key = key.commit(secp, amount, switch)?; - if commit.to_pubkey()? == pub_key { + if commit.to_pubkey(secp)? == pub_key { Ok(Some((id, switch))) } else { Ok(None) diff --git a/core/src/libtx/reward.rs b/core/src/libtx/reward.rs index 1be313749b..e3f314884c 100644 --- a/core/src/libtx/reward.rs +++ b/core/src/libtx/reward.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -52,14 +52,14 @@ where let secp = secp.lock(); let over_commit = secp.commit_value(reward(fees, height))?; let out_commit = output.commitment(); - let excess = secp::Secp256k1::commit_sum(vec![out_commit], vec![over_commit])?; - let pubkey = excess.to_pubkey()?; + let excess = secp.commit_sum(vec![out_commit], vec![over_commit])?; + let pubkey = excess.to_pubkey(&secp)?; let features = KernelFeatures::Coinbase; let msg = features.kernel_sig_msg()?; let sig = match test_mode { true => { - let test_nonce = secp::key::SecretKey::from_slice(&[1; 32])?; + let test_nonce = secp::key::SecretKey::from_slice(&secp, &[1; 32])?; aggsig::sign_from_key_id( &secp, keychain, diff --git a/core/src/libtx/secp_ser.rs b/core/src/libtx/secp_ser.rs index 74755a0f50..351c40a421 100644 --- a/core/src/libtx/secp_ser.rs +++ b/core/src/libtx/secp_ser.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -23,14 +23,16 @@ use util::{from_hex, ToHex}; pub mod pubkey_serde { use serde::{Deserialize, Deserializer, Serializer}; use util::secp::key::PublicKey; - use util::{from_hex, ToHex}; + use util::{from_hex, static_secp_instance, ToHex}; /// pub fn serialize(key: &PublicKey, serializer: S) -> Result where S: Serializer, { - serializer.serialize_str(&key.serialize_vec(true).to_hex()) + let static_secp = static_secp_instance(); + let static_secp = static_secp.lock(); + serializer.serialize_str(&key.serialize_vec(&static_secp, true).to_hex()) } /// @@ -39,6 +41,8 @@ pub mod pubkey_serde { D: Deserializer<'de>, { use serde::de::Error; + let static_secp = static_secp_instance(); + let static_secp = static_secp.lock(); String::deserialize(deserializer) .and_then(|string| { from_hex(&string).map_err(|err| { @@ -46,7 +50,7 @@ pub mod pubkey_serde { }) }) .and_then(|bytes: Vec| { - PublicKey::from_slice(&bytes).map_err(|err| { + PublicKey::from_slice(&static_secp, &bytes).map_err(|err| { Error::custom(format!("Unable to build Pub Key from {:?}, {}", bytes, err)) }) }) @@ -55,17 +59,21 @@ pub mod pubkey_serde { /// Serializes an Option to and from hex pub mod option_sig_serde { - use crate::serde::{Deserialize, Deserializer, Serializer}; use serde::de::Error; - use util::{from_hex, secp, ToHex}; + use serde::{Deserialize, Deserializer, Serializer}; + use util::{from_hex, secp, static_secp_instance, ToHex}; /// pub fn serialize(sig: &Option, serializer: S) -> Result where S: Serializer, { + let static_secp = static_secp_instance(); + let static_secp = static_secp.lock(); match sig { - Some(sig) => serializer.serialize_str(&(&sig.serialize_compact()[..]).to_hex()), + Some(sig) => { + serializer.serialize_str(&(&sig.serialize_compact(&static_secp)[..]).to_hex()) + } None => serializer.serialize_none(), } } @@ -75,15 +83,20 @@ pub mod option_sig_serde { where D: Deserializer<'de>, { + let static_secp = static_secp_instance(); + let static_secp = static_secp.lock(); Option::::deserialize(deserializer).and_then(|res| match res { Some(string) => from_hex(&string) .map_err(|err| { Error::custom(format!("Fail to parse signature HEX {}, {}", string, err)) }) .and_then(|bytes: Vec| { + if bytes.len() < 64 { + return Err(Error::invalid_length(bytes.len(), &"64 bytes")); + } let mut b = [0u8; 64]; b.copy_from_slice(&bytes[0..64]); - secp::Signature::from_compact(&b) + secp::Signature::from_compact(&static_secp, &b) .map(Some) .map_err(|err| Error::custom(format!("Fail to decode signature, {}", err))) }), @@ -94,9 +107,9 @@ pub mod option_sig_serde { /// Serializes an Option to and from hex pub mod option_seckey_serde { - use crate::serde::{Deserialize, Deserializer, Serializer}; use serde::de::Error; - use util::{from_hex, secp, ToHex}; + use serde::{Deserialize, Deserializer, Serializer}; + use util::{from_hex, secp, static_secp_instance, ToHex}; /// pub fn serialize( @@ -117,15 +130,20 @@ pub mod option_seckey_serde { where D: Deserializer<'de>, { + let static_secp = static_secp_instance(); + let static_secp = static_secp.lock(); Option::::deserialize(deserializer).and_then(|res| match res { Some(string) => from_hex(&string) .map_err(|err| { Error::custom(format!("Fail to parse key from HEX {}, {}", string, err)) }) .and_then(|bytes: Vec| { + if bytes.len() < 32 { + return Err(Error::invalid_length(bytes.len(), &"32 bytes")); + } let mut b = [0u8; 32]; b.copy_from_slice(&bytes[0..32]); - secp::key::SecretKey::from_slice(&b) + secp::key::SecretKey::from_slice(&static_secp, &b) .map(Some) .map_err(|err| Error::custom(format!("Fail to decode key, {}", err))) }), @@ -136,16 +154,18 @@ pub mod option_seckey_serde { /// Serializes a secp::Signature to and from hex pub mod sig_serde { - use crate::serde::{Deserialize, Deserializer, Serializer}; use serde::de::Error; - use util::{from_hex, secp, ToHex}; + use serde::{Deserialize, Deserializer, Serializer}; + use util::{from_hex, secp, static_secp_instance, ToHex}; /// pub fn serialize(sig: &secp::Signature, serializer: S) -> Result where S: Serializer, { - serializer.serialize_str(&(&sig.serialize_compact()[..]).to_hex()) + let static_secp = static_secp_instance(); + let static_secp = static_secp.lock(); + serializer.serialize_str(&(&sig.serialize_compact(&static_secp)[..]).to_hex()) } /// @@ -153,6 +173,8 @@ pub mod sig_serde { where D: Deserializer<'de>, { + let static_secp = static_secp_instance(); + let static_secp = static_secp.lock(); String::deserialize(deserializer) .and_then(|string| { from_hex(&string).map_err(|err| { @@ -160,9 +182,12 @@ pub mod sig_serde { }) }) .and_then(|bytes: Vec| { + if bytes.len() < 64 { + return Err(Error::invalid_length(bytes.len(), &"64 bytes")); + } let mut b = [0u8; 64]; b.copy_from_slice(&bytes[0..64]); - secp::Signature::from_compact(&b) + secp::Signature::from_compact(&static_secp, &b) .map_err(|err| Error::custom(format!("Fail to decode signature, {}", err))) }) } @@ -170,8 +195,8 @@ pub mod sig_serde { /// Serializes an Option to and from hex pub mod option_commitment_serde { - use crate::serde::{Deserialize, Deserializer, Serializer}; use serde::de::Error; + use serde::{Deserialize, Deserializer, Serializer}; use util::secp::pedersen::Commitment; use util::{from_hex, ToHex}; @@ -392,7 +417,7 @@ mod test { pub fn random() -> SerTest { let static_secp = static_secp_instance(); let secp = static_secp.lock(); - let sk = SecretKey::new(&mut thread_rng()); + let sk = SecretKey::new(&secp, &mut thread_rng()); let mut msg = [0u8; 32]; thread_rng().fill(&mut msg); let msg = Message::from_slice(&msg).unwrap(); diff --git a/core/src/macros.rs b/core/src/macros.rs index 28e6e50231..d2fa9faaaf 100644 --- a/core/src/macros.rs +++ b/core/src/macros.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/core/src/pow.rs b/core/src/pow.rs index 1446214d34..7aaa04a1de 100644 --- a/core/src/pow.rs +++ b/core/src/pow.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -33,8 +33,6 @@ pub use self::types::*; use crate::core::{Block, BlockHeader}; use crate::genesis; use crate::global; -use chrono; -use num; #[macro_use] mod common; @@ -54,8 +52,8 @@ pub use crate::pow::cuckarood::{new_cuckarood_ctx, CuckaroodContext}; pub use crate::pow::cuckaroom::{new_cuckaroom_ctx, CuckaroomContext}; pub use crate::pow::cuckarooz::{new_cuckarooz_ctx, CuckaroozContext}; pub use crate::pow::cuckatoo::{new_cuckatoo_ctx, CuckatooContext}; -pub use crate::pow::error::{Error, ErrorKind}; -use chrono::prelude::{DateTime, NaiveDateTime, Utc}; +pub use crate::pow::error::Error; +use chrono::prelude::DateTime; const MAX_SOLS: u32 = 10; @@ -70,7 +68,7 @@ pub fn verify_size(bh: &BlockHeader) -> Result<(), Error> { )?; ctx.set_header_nonce( bh.pre_pow() - .map_err(|e| ErrorKind::PrePowError(format!("{}", e)))?, + .map_err(|e| Error::PrePowError(format!("{}", e)))?, None, false, )?; @@ -109,7 +107,7 @@ pub fn pow_size( let mut ctx = global::create_pow_context::(bh.height, sz, proof_size, MAX_SOLS)?; ctx.set_header_nonce( bh.pre_pow() - .map_err(|e| ErrorKind::PrePowError(format!("{}", e)))?, + .map_err(|e| Error::PrePowError(format!("{}", e)))?, None, true, )?; @@ -127,7 +125,7 @@ pub fn pow_size( // and if we're back where we started, update the time (changes the hash as // well) if bh.pow.nonce == start_nonce { - bh.timestamp = DateTime::::from_utc(NaiveDateTime::from_timestamp(0, 0), Utc); + bh.timestamp = DateTime::from_timestamp(0, 0).unwrap().to_utc(); } } } diff --git a/core/src/pow/common.rs b/core/src/pow/common.rs index ddb24eb862..2fff6e2fb9 100644 --- a/core/src/pow/common.rs +++ b/core/src/pow/common.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,10 +15,10 @@ //! Common types and traits for cuckoo family of solvers use crate::pow::error::Error; -use crate::pow::num::{PrimInt, ToPrimitive}; use crate::pow::siphash::siphash24; use blake2::blake2b::blake2b; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; +use num::{PrimInt, ToPrimitive}; use std::fmt; use std::hash::Hash; use std::io::Cursor; @@ -82,7 +82,7 @@ pub fn create_siphash_keys(header: &[u8]) -> Result<[u64; 4], Error> { /// Utility struct to calculate commonly used Cuckoo parameters calculated /// from header, nonce, edge_bits, etc. pub struct CuckooParams { - pub edge_bits: u8, + //pub edge_bits: u8, pub proof_size: usize, pub num_edges: u64, pub siphash_keys: [u64; 4], @@ -98,7 +98,7 @@ impl CuckooParams { let num_nodes = 1u64 << node_bits; let node_mask = num_nodes - 1; Ok(CuckooParams { - edge_bits, + //edge_bits, proof_size, num_edges, siphash_keys: [0; 4], diff --git a/core/src/pow/cuckaroo.rs b/core/src/pow/cuckaroo.rs index 61447f861f..8af209565f 100644 --- a/core/src/pow/cuckaroo.rs +++ b/core/src/pow/cuckaroo.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -25,7 +25,7 @@ use crate::global; use crate::pow::common::CuckooParams; -use crate::pow::error::{Error, ErrorKind}; +use crate::pow::error::Error; use crate::pow::siphash::siphash_block; use crate::pow::{PoWContext, Proof}; @@ -57,30 +57,58 @@ impl PoWContext for CuckarooContext { } fn verify(&self, proof: &Proof) -> Result<(), Error> { - if proof.proof_size() != global::proofsize() { - return Err(ErrorKind::Verification("wrong cycle length".to_owned()).into()); + let size = proof.proof_size(); + if size != global::proofsize() { + return Err(Error::Verification("wrong cycle length".to_owned())); } let nonces = &proof.nonces; - let mut uvs = vec![0u64; 2 * proof.proof_size()]; + let mut uvs = vec![0u64; 2 * size]; let mut xor0: u64 = 0; let mut xor1: u64 = 0; + let mask = u64::MAX >> (size as u64).leading_zeros(); // round size up to 2-power - 1 + // the next three arrays form a linked list of nodes with matching bits 6..1 + let mut headu = vec![2 * size; 1 + mask as usize]; + let mut headv = vec![2 * size; 1 + mask as usize]; + let mut prev = vec![0usize; 2 * size]; - for n in 0..proof.proof_size() { + for n in 0..size { if nonces[n] > self.params.edge_mask { - return Err(ErrorKind::Verification("edge too big".to_owned()).into()); + return Err(Error::Verification("edge too big".to_owned())); } if n > 0 && nonces[n] <= nonces[n - 1] { - return Err(ErrorKind::Verification("edges not ascending".to_owned()).into()); + return Err(Error::Verification("edges not ascending".to_owned())); } // 21 is standard siphash rotation constant let edge: u64 = siphash_block(&self.params.siphash_keys, nonces[n], 21, false); - uvs[2 * n] = edge & self.params.node_mask; - xor0 ^= uvs[2 * n]; - uvs[2 * n + 1] = (edge >> 32) & self.params.node_mask; - xor1 ^= uvs[2 * n + 1]; + let u = edge & self.params.node_mask; + let v = (edge >> 32) & self.params.node_mask; + + uvs[2 * n] = u; + let ubits = (u & mask) as usize; + prev[2 * n] = headu[ubits]; + headu[ubits] = 2 * n; + + uvs[2 * n + 1] = v; + let vbits = (v & mask) as usize; + prev[2 * n + 1] = headv[vbits]; + headv[vbits] = 2 * n + 1; + + xor0 ^= u; + xor1 ^= v; } if xor0 | xor1 != 0 { - return Err(ErrorKind::Verification("endpoints don't match up".to_owned()).into()); + return Err(Error::Verification("endpoints don't match up".to_owned())); + } + // make prev lists circular + for n in 0..size { + if prev[2 * n] == 2 * size { + let ubits = (uvs[2 * n] & mask) as usize; + prev[2 * n] = headu[ubits]; + } + if prev[2 * n + 1] == 2 * size { + let vbits = (uvs[2 * n + 1] & mask) as usize; + prev[2 * n + 1] = headv[vbits]; + } } let mut n = 0; let mut i = 0; @@ -90,20 +118,20 @@ impl PoWContext for CuckarooContext { j = i; let mut k = j; loop { - k = (k + 2) % (2 * self.params.proof_size); + k = prev[k]; if k == i { break; } if uvs[k] == uvs[i] { // find other edge endpoint matching one at i if j != i { - return Err(ErrorKind::Verification("branch in cycle".to_owned()).into()); + return Err(Error::Verification("branch in cycle".to_owned())); } j = k; } } if j == i { - return Err(ErrorKind::Verification("cycle dead ends".to_owned()).into()); + return Err(Error::Verification("cycle dead ends".to_owned())); } i = j ^ 1; n += 1; @@ -111,10 +139,10 @@ impl PoWContext for CuckarooContext { break; } } - if n == self.params.proof_size { + if n == size { Ok(()) } else { - Err(ErrorKind::Verification("cycle too short".to_owned()).into()) + Err(Error::Verification("cycle too short".to_owned())) } } } @@ -159,7 +187,9 @@ mod test { let mut ctx = new_impl(19, 42); ctx.params.siphash_keys = V1_19_HASH; assert!(ctx.verify(&Proof::new(V1_19_SOL.to_vec())).is_ok()); + assert!(ctx.verify(&Proof::new(V2_19_SOL.to_vec())).is_err()); ctx.params.siphash_keys = V2_19_HASH.clone(); + assert!(ctx.verify(&Proof::new(V1_19_SOL.to_vec())).is_err()); assert!(ctx.verify(&Proof::new(V2_19_SOL.to_vec())).is_ok()); assert!(ctx.verify(&Proof::zero(42)).is_err()); } diff --git a/core/src/pow/cuckarood.rs b/core/src/pow/cuckarood.rs index 61fd7e5e28..ed4c73f1d1 100644 --- a/core/src/pow/cuckarood.rs +++ b/core/src/pow/cuckarood.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -24,7 +24,7 @@ use crate::global; use crate::pow::common::CuckooParams; -use crate::pow::error::{Error, ErrorKind}; +use crate::pow::error::Error; use crate::pow::siphash::siphash_block; use crate::pow::{PoWContext, Proof}; @@ -56,37 +56,54 @@ impl PoWContext for CuckaroodContext { } fn verify(&self, proof: &Proof) -> Result<(), Error> { - if proof.proof_size() != global::proofsize() { - return Err(ErrorKind::Verification("wrong cycle length".to_owned()).into()); + let size = proof.proof_size(); + if size != global::proofsize() { + return Err(Error::Verification("wrong cycle length".to_owned())); } let nonces = &proof.nonces; - let mut uvs = vec![0u64; 2 * proof.proof_size()]; + let mut uvs = vec![0u64; 2 * size]; let mut ndir = vec![0usize; 2]; let mut xor0: u64 = 0; let mut xor1: u64 = 0; + let mask = u64::MAX >> (size as u64).leading_zeros(); // round size up to 2-power - 1 + // the next two arrays form a linked list of nodes with matching bits 4..0|dir + let mut headu = vec![2 * size; 1 + mask as usize]; + let mut headv = vec![2 * size; 1 + mask as usize]; + let mut prev = vec![0usize; 2 * size]; - for n in 0..proof.proof_size() { + for n in 0..size { let dir = (nonces[n] & 1) as usize; - if ndir[dir] >= proof.proof_size() / 2 { - return Err(ErrorKind::Verification("edges not balanced".to_owned()).into()); + if ndir[dir] >= size / 2 { + return Err(Error::Verification("edges not balanced".to_owned())); } if nonces[n] > self.params.edge_mask { - return Err(ErrorKind::Verification("edge too big".to_owned()).into()); + return Err(Error::Verification("edge too big".to_owned())); } if n > 0 && nonces[n] <= nonces[n - 1] { - return Err(ErrorKind::Verification("edges not ascending".to_owned()).into()); + return Err(Error::Verification("edges not ascending".to_owned())); } // cuckarood uses a non-standard siphash rotation constant 25 as anti-ASIC tweak let edge: u64 = siphash_block(&self.params.siphash_keys, nonces[n], 25, false); let idx = 4 * ndir[dir] + 2 * dir; - uvs[idx] = edge & self.params.node_mask; - xor0 ^= uvs[idx]; - uvs[idx + 1] = (edge >> 32) & self.params.node_mask; - xor1 ^= uvs[idx + 1]; + let u = edge & self.params.node_mask; + let v = (edge >> 32) & self.params.node_mask; + + uvs[idx] = u; + let ubits = ((u << 1 | dir as u64) & mask) as usize; + prev[idx] = headu[ubits]; + headu[ubits] = idx; + + uvs[idx + 1] = v; + let vbits = ((v << 1 | dir as u64) & mask) as usize; + prev[idx + 1] = headv[vbits]; + headv[vbits] = idx + 1; + + xor0 ^= u; + xor1 ^= v; ndir[dir] += 1; } if xor0 | xor1 != 0 { - return Err(ErrorKind::Verification("endpoints don't match up".to_owned()).into()); + return Err(Error::Verification("endpoints don't match up".to_owned())); } let mut n = 0; let mut i = 0; @@ -94,17 +111,23 @@ impl PoWContext for CuckaroodContext { loop { // follow cycle j = i; - for k in (((i % 4) ^ 2)..(2 * self.params.proof_size)).step_by(4) { + let mut k = if i & 1 == 0 { + headu[((uvs[i] << 1 | 1) & mask) as usize] + } else { + headv[((uvs[i] << 1 | 0) & mask) as usize] + }; + while k != 2 * size { if uvs[k] == uvs[i] { // find reverse edge endpoint identical to one at i if j != i { - return Err(ErrorKind::Verification("branch in cycle".to_owned()).into()); + return Err(Error::Verification("branch in cycle".to_owned())); } j = k; } + k = prev[k]; } if j == i { - return Err(ErrorKind::Verification("cycle dead ends".to_owned()).into()); + return Err(Error::Verification("cycle dead ends".to_owned())); } i = j ^ 1; n += 1; @@ -112,10 +135,10 @@ impl PoWContext for CuckaroodContext { break; } } - if n == self.params.proof_size { + if n == size { Ok(()) } else { - Err(ErrorKind::Verification("cycle too short".to_owned()).into()) + Err(Error::Verification("cycle too short".to_owned())) } } } diff --git a/core/src/pow/cuckaroom.rs b/core/src/pow/cuckaroom.rs index dd0aa8ae58..82fc031d77 100644 --- a/core/src/pow/cuckaroom.rs +++ b/core/src/pow/cuckaroom.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -23,7 +23,7 @@ use crate::global; use crate::pow::common::CuckooParams; -use crate::pow::error::{Error, ErrorKind}; +use crate::pow::error::Error; use crate::pow::siphash::siphash_block; use crate::pow::{PoWContext, Proof}; @@ -55,60 +55,72 @@ impl PoWContext for CuckaroomContext { } fn verify(&self, proof: &Proof) -> Result<(), Error> { - let proofsize = proof.proof_size(); - if proofsize != global::proofsize() { - return Err(ErrorKind::Verification("wrong cycle length".to_owned()).into()); + let size = proof.proof_size(); + if size != global::proofsize() { + return Err(Error::Verification("wrong cycle length".to_owned())); } let nonces = &proof.nonces; - let mut from = vec![0u64; proofsize]; - let mut to = vec![0u64; proofsize]; + let mut from = vec![0u64; size]; + let mut to = vec![0u64; size]; let mut xor_from: u64 = 0; let mut xor_to: u64 = 0; + let mask = u64::MAX >> (size as u64).leading_zeros(); // round size up to 2-power - 1 + // the next two arrays form a linked list of nodes with matching bits 6..1 + let mut head = vec![size; 1 + mask as usize]; + let mut prev = vec![0usize; size]; - for n in 0..proofsize { + for n in 0..size { if nonces[n] > self.params.edge_mask { - return Err(ErrorKind::Verification("edge too big".to_owned()).into()); + return Err(Error::Verification("edge too big".to_owned())); } if n > 0 && nonces[n] <= nonces[n - 1] { - return Err(ErrorKind::Verification("edges not ascending".to_owned()).into()); + return Err(Error::Verification("edges not ascending".to_owned())); } // 21 is standard siphash rotation constant let edge: u64 = siphash_block(&self.params.siphash_keys, nonces[n], 21, true); - from[n] = edge & self.params.node_mask; + let u = edge & self.params.node_mask; + let v = (edge >> 32) & self.params.node_mask; + from[n] = u; + let bits = (u & mask) as usize; + prev[n] = head[bits]; + head[bits] = n; + to[n] = v; xor_from ^= from[n]; - to[n] = (edge >> 32) & self.params.node_mask; xor_to ^= to[n]; } if xor_from != xor_to { - return Err(ErrorKind::Verification("endpoints don't match up".to_owned()).into()); + return Err(Error::Verification("endpoints don't match up".to_owned())); } - let mut visited = vec![false; proofsize]; + let mut visited = vec![false; size]; let mut n = 0; let mut i = 0; loop { // follow cycle if visited[i] { - return Err(ErrorKind::Verification("branch in cycle".to_owned()).into()); + return Err(Error::Verification("branch in cycle".to_owned())); } visited[i] = true; - let mut nexti = 0; - while from[nexti] != to[i] { - nexti += 1; - if nexti == proofsize { - return Err(ErrorKind::Verification("cycle dead ends".to_owned()).into()); + let mut k = head[(to[i] & mask) as usize]; + loop { + if k == size { + return Err(Error::Verification("cycle dead ends".to_owned())); } + if from[k] == to[i] { + break; + } + k = prev[k]; } - i = nexti; + i = k; n += 1; if i == 0 { // must cycle back to start or find branch break; } } - if n == proofsize { + if n == size { Ok(()) } else { - Err(ErrorKind::Verification("cycle too short".to_owned()).into()) + Err(Error::Verification("cycle too short".to_owned())) } } } diff --git a/core/src/pow/cuckarooz.rs b/core/src/pow/cuckarooz.rs index 730140b347..5705a530ea 100644 --- a/core/src/pow/cuckarooz.rs +++ b/core/src/pow/cuckarooz.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -24,7 +24,7 @@ use crate::global; use crate::pow::common::CuckooParams; -use crate::pow::error::{Error, ErrorKind}; +use crate::pow::error::Error; use crate::pow::siphash::siphash_block; use crate::pow::{PoWContext, Proof}; @@ -56,28 +56,51 @@ impl PoWContext for CuckaroozContext { } fn verify(&self, proof: &Proof) -> Result<(), Error> { - if proof.proof_size() != global::proofsize() { - return Err(ErrorKind::Verification("wrong cycle length".to_owned()).into()); + let size = proof.proof_size(); + if size != global::proofsize() { + return Err(Error::Verification("wrong cycle length".to_owned())); } let nonces = &proof.nonces; - let mut uvs = vec![0u64; 2 * proof.proof_size()]; + let mut uvs = vec![0u64; 2 * size]; let mut xoruv: u64 = 0; + let mask = u64::MAX >> (size as u64).leading_zeros(); // round size up to 2-power - 1 + // the next two arrays form a linked list of nodes with matching bits 6..1 + let mut head = vec![2 * size; 1 + mask as usize]; + let mut prev = vec![0usize; 2 * size]; - for n in 0..proof.proof_size() { + for n in 0..size { if nonces[n] > self.params.edge_mask { - return Err(ErrorKind::Verification("edge too big".to_owned()).into()); + return Err(Error::Verification("edge too big".to_owned())); } if n > 0 && nonces[n] <= nonces[n - 1] { - return Err(ErrorKind::Verification("edges not ascending".to_owned()).into()); + return Err(Error::Verification("edges not ascending".to_owned())); } // 21 is standard siphash rotation constant let edge: u64 = siphash_block(&self.params.siphash_keys, nonces[n], 21, true); - uvs[2 * n] = edge & self.params.node_mask; - uvs[2 * n + 1] = (edge >> 32) & self.params.node_mask; + let u = edge & self.params.node_mask; + let v = (edge >> 32) & self.params.node_mask; + + uvs[2 * n] = u; + let bits = (u & mask) as usize; + prev[2 * n] = head[bits]; + head[bits] = 2 * n; + + uvs[2 * n + 1] = v; + let bits = (v & mask) as usize; + prev[2 * n + 1] = head[bits]; + head[bits] = 2 * n + 1; + xoruv ^= uvs[2 * n] ^ uvs[2 * n + 1]; } if xoruv != 0 { - return Err(ErrorKind::Verification("endpoints don't match up".to_owned()).into()); + return Err(Error::Verification("endpoints don't match up".to_owned())); + } + // make prev lists circular + for n in 0..(2 * size) { + if prev[n] == 2 * size { + let bits = (uvs[n] & mask) as usize; + prev[n] = head[bits]; + } } let mut n = 0; let mut i = 0; @@ -87,20 +110,20 @@ impl PoWContext for CuckaroozContext { j = i; let mut k = j; loop { - k = (k + 1) % (2 * self.params.proof_size); + k = prev[k]; if k == i { break; } if uvs[k] == uvs[i] { // find other edge endpoint matching one at i if j != i { - return Err(ErrorKind::Verification("branch in cycle".to_owned()).into()); + return Err(Error::Verification("branch in cycle".to_owned())); } j = k; } } if j == i { - return Err(ErrorKind::Verification("cycle dead ends".to_owned()).into()); + return Err(Error::Verification("cycle dead ends".to_owned())); } i = j ^ 1; n += 1; @@ -111,7 +134,7 @@ impl PoWContext for CuckaroozContext { if n == self.params.proof_size { Ok(()) } else { - Err(ErrorKind::Verification("cycle too short".to_owned()).into()) + Err(Error::Verification("cycle too short".to_owned())) } } } diff --git a/core/src/pow/cuckatoo.rs b/core/src/pow/cuckatoo.rs index b23b34f9ca..013e64145c 100644 --- a/core/src/pow/cuckatoo.rs +++ b/core/src/pow/cuckatoo.rs @@ -14,7 +14,7 @@ //! Implementation of Cuckatoo Cycle designed by John Tromp. use crate::global; use crate::pow::common::{CuckooParams, Link}; -use crate::pow::error::{Error, ErrorKind}; +use crate::pow::error::Error; use crate::pow::{PoWContext, Proof}; use byteorder::{BigEndian, WriteBytesExt}; use croaring::Bitmap; @@ -46,7 +46,7 @@ impl Graph { /// Create a new graph with given parameters pub fn new(max_edges: u64, max_sols: u32, proof_size: usize) -> Result { if max_edges >= u64::max_value() / 2 { - return Err(ErrorKind::Verification("graph is to big to build".to_string()).into()); + return Err(Error::Verification("graph is to big to build".to_string())); } let max_nodes = 2 * max_edges; Ok(Graph { @@ -79,7 +79,7 @@ impl Graph { /// Add an edge to the graph pub fn add_edge(&mut self, u: u64, mut v: u64) -> Result<(), Error> { if u >= self.max_nodes || v >= self.max_nodes { - return Err(ErrorKind::EdgeAddition.into()); + return Err(Error::EdgeAddition); } v = v + self.max_nodes; let adj_u = self.adj_list[(u ^ 1) as usize]; @@ -92,7 +92,7 @@ impl Graph { let ulink = self.links.len() as u64; let vlink = (self.links.len() + 1) as u64; if vlink == self.nil { - return Err(ErrorKind::EdgeAddition.into()); + return Err(Error::EdgeAddition); } self.links.push(Link { next: self.adj_list[u as usize], @@ -246,7 +246,7 @@ impl CuckatooContext { self.verify_impl(&s)?; } if self.graph.solutions.is_empty() { - Err(ErrorKind::NoSolution.into()) + Err(Error::NoSolution) } else { Ok(self.graph.solutions.clone()) } @@ -255,28 +255,56 @@ impl CuckatooContext { /// Verify that given edges are ascending and form a cycle in a header-generated /// graph pub fn verify_impl(&self, proof: &Proof) -> Result<(), Error> { - if proof.proof_size() != global::proofsize() { - return Err(ErrorKind::Verification("wrong cycle length".to_owned()).into()); + let size = proof.proof_size(); + if size != global::proofsize() { + return Err(Error::Verification("wrong cycle length".to_owned())); } let nonces = &proof.nonces; - let mut uvs = vec![0u64; 2 * proof.proof_size()]; - let mut xor0: u64 = (self.params.proof_size as u64 / 2) & 1; + let mut uvs = vec![0u64; 2 * size]; + let mask = u64::MAX >> (size as u64).leading_zeros(); // round size up to 2-power - 1 + let mut xor0: u64 = (size as u64 / 2) & 1; let mut xor1: u64 = xor0; + // the next two arrays form a linked list of nodes with matching bits 6..1 + let mut headu = vec![2 * size; 1 + mask as usize]; + let mut headv = vec![2 * size; 1 + mask as usize]; + let mut prev = vec![0usize; 2 * size]; - for n in 0..proof.proof_size() { + for n in 0..size { if nonces[n] > self.params.edge_mask { - return Err(ErrorKind::Verification("edge too big".to_owned()).into()); + return Err(Error::Verification("edge too big".to_owned())); } if n > 0 && nonces[n] <= nonces[n - 1] { - return Err(ErrorKind::Verification("edges not ascending".to_owned()).into()); + return Err(Error::Verification("edges not ascending".to_owned())); } - uvs[2 * n] = self.params.sipnode(nonces[n], 0)?; - uvs[2 * n + 1] = self.params.sipnode(nonces[n], 1)?; - xor0 ^= uvs[2 * n]; - xor1 ^= uvs[2 * n + 1]; + let u = self.params.sipnode(nonces[n], 0)?; + let v = self.params.sipnode(nonces[n], 1)?; + + uvs[2 * n] = u; + let ubits = (u >> 1 & mask) as usize; // larger shifts work too, up to edgebits-6 + prev[2 * n] = headu[ubits]; + headu[ubits] = 2 * n; + + uvs[2 * n + 1] = v; + let vbits = (v >> 1 & mask) as usize; + prev[2 * n + 1] = headv[vbits]; + headv[vbits] = 2 * n + 1; + + xor0 ^= u; + xor1 ^= v; } if xor0 | xor1 != 0 { - return Err(ErrorKind::Verification("endpoints don't match up".to_owned()).into()); + return Err(Error::Verification("endpoints don't match up".to_owned())); + } + // make prev lists circular + for n in 0..size { + if prev[2 * n] == 2 * size { + let ubits = (uvs[2 * n] >> 1 & mask) as usize; + prev[2 * n] = headu[ubits]; + } + if prev[2 * n + 1] == 2 * size { + let vbits = (uvs[2 * n + 1] >> 1 & mask) as usize; + prev[2 * n + 1] = headv[vbits]; + } } let mut n = 0; let mut i = 0; @@ -286,20 +314,20 @@ impl CuckatooContext { j = i; let mut k = j; loop { - k = (k + 2) % (2 * self.params.proof_size); + k = prev[k]; if k == i { break; } if uvs[k] >> 1 == uvs[i] >> 1 { // find other edge endpoint matching one at i if j != i { - return Err(ErrorKind::Verification("branch in cycle".to_owned()).into()); + return Err(Error::Verification("branch in cycle".to_owned())); } j = k; } } if j == i || uvs[j] == uvs[i] { - return Err(ErrorKind::Verification("cycle dead ends".to_owned()).into()); + return Err(Error::Verification("cycle dead ends".to_owned())); } i = j ^ 1; n += 1; @@ -307,10 +335,10 @@ impl CuckatooContext { break; } } - if n == self.params.proof_size { + if n == size { Ok(()) } else { - Err(ErrorKind::Verification("cycle too short".to_owned()).into()) + Err(Error::Verification("cycle too short".to_owned())) } } } @@ -457,13 +485,13 @@ mod test { let mut header = [0u8; 80]; header[0] = 1u8; ctx.set_header_nonce(header.to_vec(), Some(20), false)?; - assert!(!ctx.verify(&Proof::new(V1_29.to_vec())).is_ok()); + assert!(ctx.verify(&Proof::new(V1_29.to_vec())).is_err()); header[0] = 0u8; ctx.set_header_nonce(header.to_vec(), Some(20), false)?; assert!(ctx.verify(&Proof::new(V1_29.to_vec())).is_ok()); let mut bad_proof = V1_29; bad_proof[0] = 0x48a9e1; - assert!(!ctx.verify(&Proof::new(bad_proof.to_vec())).is_ok()); + assert!(ctx.verify(&Proof::new(bad_proof.to_vec())).is_err()); Ok(()) } diff --git a/core/src/pow/error.rs b/core/src/pow/error.rs index a23a6eeeb0..a68ca841a1 100644 --- a/core/src/pow/error.rs +++ b/core/src/pow/error.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -13,86 +13,37 @@ // limitations under the License. //! Cuckatoo specific errors -use failure::{Backtrace, Context, Fail}; -use std::fmt::{self, Display}; -use std::io; /// Cuckatoo solver or validation error -#[derive(Debug)] -pub struct Error { - inner: Context, -} - -#[derive(Clone, Debug, Eq, Fail, PartialEq)] +#[derive(Debug, thiserror::Error)] /// Libwallet error types -pub enum ErrorKind { +pub enum Error { /// Pre POW error - #[fail(display = "POW prepare error: {}", _0)] + #[error("POW prepare error: {0}")] PrePowError(String), /// Verification error - #[fail(display = "POW Verification Error: {}", _0)] + #[error("POW Verification Error: {0}")] Verification(String), /// IO Error - #[fail(display = "POW IO Error")] - IOError, + #[error("POW IO Error, {source:?}")] + IOError { + /// Io Error Convert + #[from] + source: std::io::Error, + }, /// Unexpected Edge Error - #[fail(display = "POW Edge Addition Error")] + #[error("POW Edge Addition Error")] EdgeAddition, /// Path Error - #[fail(display = "POW Path Error")] + #[error("POW Path Error")] Path, /// Invalid cycle - #[fail(display = "POW Invalid Cycle length: {}", _0)] + #[error("POW Invalid Cycle length: {0}")] InvalidCycle(usize), /// No Cycle - #[fail(display = "POW No Cycle")] + #[error("POW No Cycle")] NoCycle, /// No Solution - #[fail(display = "POW No Solution")] + #[error("POW No Solution")] NoSolution, } - -impl Fail for Error { - fn cause(&self) -> Option<&dyn Fail> { - self.inner.cause() - } - - fn backtrace(&self) -> Option<&Backtrace> { - self.inner.backtrace() - } -} - -impl Display for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - Display::fmt(&self.inner, f) - } -} - -impl Error { - /// Return errorkind - pub fn kind(&self) -> ErrorKind { - self.inner.get_context().clone() - } -} - -impl From for Error { - fn from(kind: ErrorKind) -> Error { - Error { - inner: Context::new(kind), - } - } -} - -impl From> for Error { - fn from(inner: Context) -> Error { - Error { inner } - } -} - -impl From for Error { - fn from(_error: io::Error) -> Error { - Error { - inner: Context::new(ErrorKind::IOError), - } - } -} diff --git a/core/src/pow/siphash.rs b/core/src/pow/siphash.rs index 7e9a5f6303..566f4672f8 100644 --- a/core/src/pow/siphash.rs +++ b/core/src/pow/siphash.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/core/src/pow/types.rs b/core/src/pow/types.rs index 37be40cbb6..6997de9a06 100644 --- a/core/src/pow/types.rs +++ b/core/src/pow/types.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,13 +16,14 @@ use crate::consensus::{graph_weight, MIN_DIFFICULTY, SECOND_POW_EDGE_BITS}; use crate::core::hash::{DefaultHashable, Hashed}; use crate::global; use crate::pow::error::Error; -use crate::ser::{self, Readable, Reader, Writeable, Writer}; +use crate::ser::{self, DeserializationMode, Readable, Reader, Writeable, Writer}; use rand::{thread_rng, Rng}; use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; /// Types for a Cuck(at)oo proof of work and its encapsulation as a fully usable /// proof of work within a block header. use std::cmp::{max, min}; use std::ops::{Add, Div, Mul, Sub}; +use std::u64; use std::{fmt, iter}; /// Generic trait for a solver/verifier providing common interface into Cuckoo-family PoW @@ -275,6 +276,12 @@ impl ProofOfWork { } } + /// Maximum unscaled difficulty this proof of work can achieve + pub fn to_unscaled_difficulty(&self) -> Difficulty { + // using scale = 1 gives "unscaled" value + Difficulty::from_num(self.proof.scaled_difficulty(1u64)) + } + /// The edge_bits used for the cuckoo cycle size on this proof pub fn edge_bits(&self) -> u8 { self.proof.edge_bits @@ -303,8 +310,11 @@ impl ProofOfWork { /// /// The hash of the `Proof` is the hash of its packed nonces when serializing /// them at their exact bit size. The resulting bit sequence is padded to be -/// byte-aligned. -/// +/// byte-aligned. We form a PROOFSIZE*edge_bits integer by packing the PROOFSIZE edge +/// indices together, with edge index i occupying bits i * edge_bits through +/// (i+1) * edge_bits - 1, padding it with up to 7 0-bits to a multiple of 8 bits, +/// writing as a little endian byte array, and hashing with blake2b using 256 bit digest. + #[derive(Clone, PartialOrd, PartialEq, Serialize)] pub struct Proof { /// Power of 2 used for the size of the cuckoo graph @@ -348,6 +358,11 @@ impl Proof { } } + /// Number of bytes required store a proof of given edge bits + pub fn pack_len(bit_width: u8) -> usize { + (bit_width as usize * global::proofsize() + 7) / 8 + } + /// Builds a proof with random POW data, /// needed so that tests that ignore POW /// don't fail due to duplicate hashes @@ -372,6 +387,17 @@ impl Proof { self.nonces.len() } + /// Pack the nonces of the proof to their exact bit size as described above + pub fn pack_nonces(&self) -> Vec { + let mut compressed = vec![0u8; Proof::pack_len(self.edge_bits)]; + pack_bits( + self.edge_bits, + &self.nonces[0..self.nonces.len()], + &mut compressed, + ); + compressed + } + /// Difficulty achieved by this proof with given scaling factor fn scaled_difficulty(&self, scale: u64) -> u64 { let diff = ((scale as u128) << 64) / (max(1, self.hash().to_u64()) as u128); @@ -379,6 +405,34 @@ impl Proof { } } +/// Pack an array of u64s into `compressed` at the specified bit width. Caller +/// must ensure `compressed` is the right size +fn pack_bits(bit_width: u8, uncompressed: &[u64], mut compressed: &mut [u8]) { + // We will use a `u64` as a mini buffer of 64 bits. + // We accumulate bits in it until capacity, at which point we just copy this + // mini buffer to compressed. + let mut mini_buffer = 0u64; + let mut remaining = 64; + for el in uncompressed { + mini_buffer |= el << (64 - remaining); + if bit_width < remaining { + remaining -= bit_width; + } else { + compressed[..8].copy_from_slice(&mini_buffer.to_le_bytes()); + compressed = &mut compressed[8..]; + mini_buffer = el >> remaining; + remaining = 64 + remaining - bit_width; + } + } + let mut remainder = compressed.len() % 8; + if remainder == 0 { + remainder = 8; + } + if mini_buffer > 0 { + compressed[..].copy_from_slice(&mini_buffer.to_le_bytes()[..remainder]); + } +} + fn extract_bits(bits: &[u8], bit_start: usize, bit_count: usize, read_from: usize) -> u64 { let mut buf: [u8; 8] = [0; 8]; buf.copy_from_slice(&bits[read_from..read_from + 8]); @@ -425,32 +479,37 @@ impl Readable for Proof { } // prepare nonces and read the right number of bytes - let mut nonces = Vec::with_capacity(global::proofsize()); - let nonce_bits = edge_bits as usize; - let bits_len = nonce_bits * global::proofsize(); - let bytes_len = BitVec::bytes_len(bits_len); - if bytes_len < 8 { - return Err(ser::Error::CorruptedData(format!( - "Nonce length {} is too small", - bytes_len - ))); - } - let bits = reader.read_fixed_bytes(bytes_len)?; - - for n in 0..global::proofsize() { - nonces.push(read_number(&bits, n * nonce_bits, nonce_bits)); - } + // If skipping pow proof, we can stop after reading edge bits + if reader.deserialization_mode() != DeserializationMode::SkipPow { + let mut nonces = Vec::with_capacity(global::proofsize()); + let nonce_bits = edge_bits as usize; + let bytes_len = Proof::pack_len(edge_bits); + if bytes_len < 8 { + return Err(ser::Error::CorruptedData(format!( + "Nonce length {} is too small", + bytes_len + ))); + } + let bits = reader.read_fixed_bytes(bytes_len)?; + for n in 0..global::proofsize() { + nonces.push(read_number(&bits, n * nonce_bits, nonce_bits)); + } - //// check the last bits of the last byte are zeroed, we don't use them but - //// still better to enforce to avoid any malleability - let end_of_data = global::proofsize() * nonce_bits; - if read_number(&bits, end_of_data, bytes_len * 8 - end_of_data) != 0 { - return Err(ser::Error::CorruptedData( - "Fail to read nonce as a number".to_string(), - )); + //// check the last bits of the last byte are zeroed, we don't use them but + //// still better to enforce to avoid any malleability + let end_of_data = global::proofsize() * nonce_bits; + if read_number(&bits, end_of_data, bytes_len * 8 - end_of_data) != 0 { + return Err(ser::Error::CorruptedData( + "Fail to read nonce as a number".to_string(), + )); + } + Ok(Proof { edge_bits, nonces }) + } else { + Ok(Proof { + edge_bits, + nonces: vec![], + }) } - - Ok(Proof { edge_bits, nonces }) } } @@ -459,51 +518,14 @@ impl Writeable for Proof { if writer.serialization_mode() != ser::SerializationMode::Hash { writer.write_u8(self.edge_bits)?; } - let nonce_bits = self.edge_bits as usize; - assert!(nonce_bits < 256); - let mut bitvec = BitVec::new(nonce_bits * global::proofsize()); - for (n, nonce) in self.nonces.iter().enumerate() { - for bit in 0..nonce_bits { - if nonce & (1 << bit) != 0 { - bitvec.set_bit_at(n * nonce_bits + (bit as usize)) - } - } - } - // caller suppose to verify the size. Here are are crashing becase it is better than data corruption. - // Data will be corrupted because of read that will check fo the size as well - assert!(bitvec.bits.len() <= ser::READ_CHUNK_LIMIT); - writer.write_fixed_bytes(&bitvec.bits)?; - Ok(()) - } -} - -// TODO this could likely be optimized by writing whole bytes (or even words) -// in the `BitVec` at once, dealing with the truncation, instead of bits by bits -struct BitVec { - bits: Vec, -} - -impl BitVec { - /// Number of bytes required to store the provided number of bits - fn bytes_len(bits_len: usize) -> usize { - (bits_len + 7) / 8 - } - - fn new(bits_len: usize) -> BitVec { - BitVec { - bits: vec![0; BitVec::bytes_len(bits_len)], - } - } - - fn set_bit_at(&mut self, pos: usize) { - self.bits[pos / 8] |= 1 << (pos % 8) as u8; + writer.write_fixed_bytes(&self.pack_nonces()) } } #[cfg(test)] mod tests { use super::*; - use crate::ser::{BinReader, BinWriter, ProtocolVersion}; + use crate::ser::{BinReader, BinWriter, DeserializationMode, ProtocolVersion}; use rand::Rng; use std::io::Cursor; @@ -519,7 +541,11 @@ mod tests { panic!("failed to write proof {:?}", e); } buf.set_position(0); - let mut r = BinReader::new(&mut buf, ProtocolVersion::local()); + let mut r = BinReader::new( + &mut buf, + ProtocolVersion::local(), + DeserializationMode::default(), + ); match Proof::read(&mut r) { Err(e) => panic!("failed to read proof: {:?}", e), Ok(p) => assert_eq!(p, proof), diff --git a/core/src/ser.rs b/core/src/ser.rs index 7d590b3abd..fe384ba9bf 100644 --- a/core/src/ser.rs +++ b/core/src/ser.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -22,13 +22,12 @@ use crate::core::hash::{DefaultHashable, Hash, Hashed}; use crate::global::PROTOCOL_VERSION; use byteorder::{BigEndian, ByteOrder, ReadBytesExt}; +use bytes::Buf; use keychain::{BlindingFactor, Identifier, IDENTIFIER_SIZE}; -use serde::__private::from_utf8_lossy; use std::convert::TryInto; use std::fmt::{self, Debug}; use std::io::{self, Read, Write}; -use std::marker::PhantomData; -use std::{cmp, marker}; +use std::{cmp, marker, string}; use util::secp::constants::{ AGG_SIGNATURE_SIZE, COMPRESSED_PUBLIC_KEY_SIZE, MAX_PROOF_SIZE, PEDERSEN_COMMITMENT_SIZE, SECRET_KEY_SIZE, @@ -36,6 +35,7 @@ use util::secp::constants::{ use util::secp::key::PublicKey; use util::secp::pedersen::{Commitment, RangeProof}; use util::secp::Signature; +use util::secp::{ContextFlag, Secp256k1}; /// Serialization size limit for a single chunk/object or array. /// WARNING!!! You can increase the number, but never decrease @@ -45,10 +45,10 @@ pub const READ_CHUNK_LIMIT: usize = 100_000; pub const READ_VEC_SIZE_LIMIT: u64 = 100_000; /// Possible errors deriving from serializing or deserializing. -#[derive(Fail, Clone, Eq, PartialEq, Debug, Serialize, Deserialize)] +#[derive(thiserror::Error, Clone, Eq, PartialEq, Debug, Serialize, Deserialize)] pub enum Error { /// Wraps an io error produced when reading or writing - #[fail(display = "Serialization IO error {}, {:?}", _0, _1)] + #[error("Serialization IO error {0}, {1:?}")] IOErr( String, #[serde( @@ -58,13 +58,10 @@ pub enum Error { io::ErrorKind, ), /// Wraps secp256k1 error - #[fail(display = "Serialization Secp error, {}", _0)] + #[error("Serialization Secp error, {0}")] SecpError(util::secp::Error), /// Expected a given value that wasn't found - #[fail( - display = "Unexpected Data, expected {:?}, got {:?}", - expected, received - )] + #[error("Unexpected Data, expected {expected:?}, got {received:?}")] UnexpectedData { /// What we wanted expected: Vec, @@ -72,34 +69,34 @@ pub enum Error { received: Vec, }, /// Data wasn't in a consumable format - #[fail(display = "Serialization Corrupted data, {}", _0)] + #[error("Serialization Corrupted data, {0}")] CorruptedData(String), /// Incorrect number of elements (when deserializing a vec via read_multi say). - #[fail(display = "Serialization Count error, {}", _0)] + #[error("Serialization Count error, {0}")] CountError(String), /// When asked to read too much data - #[fail(display = "Serialization Too large write, {}", _0)] + #[error("Serialization Too large write, {0}")] TooLargeWriteErr(String), /// When asked to read too much data - #[fail(display = "Serialization Too large read, {}", _0)] + #[error("Serialization Too large read, {0}")] TooLargeReadErr(String), /// Error from from_hex deserialization - #[fail(display = "Serialization Hex error {}", _0)] + #[error("Serialization Hex error {0}")] HexError(String), /// Inputs/outputs/kernels must be sorted lexicographically. - #[fail(display = "Serialization Broken Sort order")] + #[error("Serialization Broken Sort order")] SortError, /// Inputs/outputs/kernels must be unique. - #[fail(display = "Serialization Unexpected Duplicate")] + #[error("Serialization Unexpected Duplicate")] DuplicateError, /// Block header version (hard-fork schedule). - #[fail(display = "Serialization Invalid block version, {}", _0)] + #[error("Serialization Invalid block version, {0}")] InvalidBlockVersion(String), /// utf8 conversion failed - #[fail(display = "UTF8 conversion failed")] + #[error("UTF8 conversion failed")] Utf8Conversion(String), /// Unsupported protocol version - #[fail(display = "unsupported protocol version, {}", _0)] + #[error("unsupported protocol version, {0}")] UnsupportedProtocolVersion(String), } @@ -109,6 +106,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: io::ErrorKind) -> Error { + Error::IOErr(format!("{}", io::Error::from(e)), e) + } +} + impl From for Error { fn from(e: util::secp::Error) -> Error { Error::SecpError(e) @@ -199,9 +202,27 @@ pub trait Writer { } } +/// Signal to a deserializable object how much of its data should be deserialized +#[derive(Copy, Clone, PartialEq, Eq)] +pub enum DeserializationMode { + /// Deserialize everything sufficiently to fully reconstruct the object + Full, + /// For Block Headers, skip reading proof + SkipPow, +} + +impl DeserializationMode { + /// Default deserialization mode + pub fn default() -> Self { + DeserializationMode::Full + } +} + /// Implementations defined how different numbers and binary structures are /// read from an underlying stream or container (depending on implementation). pub trait Reader { + /// The mode this reader is reading from + fn deserialization_mode(&self) -> DeserializationMode; /// Read a u8 from the underlying Read fn read_u8(&mut self) -> Result; /// Read a u16 from the underlying Read @@ -295,8 +316,8 @@ where // attempting to read huge amounts of data. // Probably better than checking if count * size overflows a u64 though. // Note!!! Caller on Write responsible to data size checking. - // This issue normally should never happen. If you wee this error, it is mean there are - // datat validation at write method. + // This issue normally should never happen. If you see this error, it is mean there are + // data validation issue at write method. debug_assert!(count <= READ_VEC_SIZE_LIMIT); if count > READ_VEC_SIZE_LIMIT { return Err(Error::TooLargeReadErr(format!( @@ -389,14 +410,19 @@ where pub fn deserialize( source: &mut R, version: ProtocolVersion, + mode: DeserializationMode, ) -> Result { - let mut reader = BinReader::new(source, version); + let mut reader = BinReader::new(source, version, mode); T::read(&mut reader) } /// Deserialize a Readable based on our default "local" protocol version. pub fn deserialize_default(source: &mut R) -> Result { - deserialize(source, ProtocolVersion::local()) + deserialize( + source, + ProtocolVersion::local(), + DeserializationMode::default(), + ) } /// Serializes a Writeable into any std::io::Write implementation. @@ -426,12 +452,17 @@ pub fn ser_vec(thing: &W, version: ProtocolVersion) -> Result { source: &'a mut R, version: ProtocolVersion, + deser_mode: DeserializationMode, } impl<'a, R: Read> BinReader<'a, R> { /// Constructor for a new BinReader for the provided source and protocol version. - pub fn new(source: &'a mut R, version: ProtocolVersion) -> Self { - BinReader { source, version } + pub fn new(source: &'a mut R, version: ProtocolVersion, mode: DeserializationMode) -> Self { + BinReader { + source, + version, + deser_mode: mode, + } } } @@ -442,6 +473,9 @@ fn map_io_err(err: io::Error) -> Error { /// Utility wrapper for an underlying byte Reader. Defines higher level methods /// to read numbers, byte vectors, hashes, etc. impl<'a, R: Read> Reader for BinReader<'a, R> { + fn deserialization_mode(&self) -> DeserializationMode { + self.deser_mode + } fn read_u8(&mut self) -> Result { self.source.read_u8().map_err(map_io_err) } @@ -505,6 +539,7 @@ pub struct StreamingReader<'a> { total_bytes_read: u64, version: ProtocolVersion, stream: &'a mut dyn Read, + deser_mode: DeserializationMode, } impl<'a> StreamingReader<'a> { @@ -515,6 +550,7 @@ impl<'a> StreamingReader<'a> { total_bytes_read: 0, version, stream, + deser_mode: DeserializationMode::Full, } } @@ -526,6 +562,9 @@ impl<'a> StreamingReader<'a> { /// Note: We use read_fixed_bytes() here to ensure our "async" I/O behaves as expected. impl<'a> Reader for StreamingReader<'a> { + fn deserialization_mode(&self) -> DeserializationMode { + self.deser_mode + } fn read_u8(&mut self) -> Result { let buf = self.read_fixed_bytes(1)?; Ok(buf[0]) @@ -583,6 +622,118 @@ impl<'a> Reader for StreamingReader<'a> { } } +/// Protocol version-aware wrapper around a `Buf` impl +pub struct BufReader<'a, B: Buf> { + inner: &'a mut B, + version: ProtocolVersion, + bytes_read: usize, + deser_mode: DeserializationMode, +} + +impl<'a, B: Buf> BufReader<'a, B> { + /// Construct a new BufReader + pub fn new(buf: &'a mut B, version: ProtocolVersion) -> Self { + Self { + inner: buf, + version, + bytes_read: 0, + deser_mode: DeserializationMode::Full, + } + } + + /// Check whether the buffer has enough bytes remaining to perform a read + fn has_remaining(&mut self, len: usize) -> Result<(), Error> { + if self.inner.remaining() >= len { + self.bytes_read += len; + Ok(()) + } else { + Err(io::ErrorKind::UnexpectedEof.into()) + } + } + + /// The total bytes read + pub fn bytes_read(&self) -> u64 { + self.bytes_read as u64 + } + + /// Convenience function to read from the buffer and deserialize + pub fn body(&mut self) -> Result { + T::read(self) + } +} + +impl<'a, B: Buf> Reader for BufReader<'a, B> { + fn deserialization_mode(&self) -> DeserializationMode { + self.deser_mode + } + + fn read_u8(&mut self) -> Result { + self.has_remaining(1)?; + Ok(self.inner.get_u8()) + } + + fn read_u16(&mut self) -> Result { + self.has_remaining(2)?; + Ok(self.inner.get_u16()) + } + + fn read_u32(&mut self) -> Result { + self.has_remaining(4)?; + Ok(self.inner.get_u32()) + } + + fn read_u64(&mut self) -> Result { + self.has_remaining(8)?; + Ok(self.inner.get_u64()) + } + + fn read_i32(&mut self) -> Result { + self.has_remaining(4)?; + Ok(self.inner.get_i32()) + } + + fn read_i64(&mut self) -> Result { + self.has_remaining(8)?; + Ok(self.inner.get_i64()) + } + + fn read_bytes_len_prefix(&mut self) -> Result, Error> { + let len = self.read_u64()?; + self.read_fixed_bytes(len as usize) + } + + fn read_fixed_bytes(&mut self, len: usize) -> Result, Error> { + // not reading more than 100k bytes in a single read + if len > 100_000 { + return Err(Error::TooLargeReadErr(format!( + "read unexpected large chunk of {} bytes", + len + ))); + } + self.has_remaining(len)?; + + let mut buf = vec![0; len]; + self.inner.copy_to_slice(&mut buf[..]); + Ok(buf) + } + + fn expect_u8(&mut self, val: u8) -> Result { + let b = self.read_u8()?; + if b == val { + Ok(b) + } else { + Err(Error::UnexpectedData { + expected: vec![val], + received: vec![b], + }) + } + } + + fn protocol_version(&self) -> ProtocolVersion { + self.version + } +} + impl Readable for Commitment { fn read(reader: &mut R) -> Result { let a = reader.read_fixed_bytes(PEDERSEN_COMMITMENT_SIZE)?; @@ -675,7 +826,8 @@ impl Writeable for Signature { impl Writeable for PublicKey { // Write the public key in compressed form fn write(&self, writer: &mut W) -> Result<(), Error> { - writer.write_fixed_bytes(self.serialize_vec(true))?; + let secp = Secp256k1::with_caps(ContextFlag::None); + writer.write_fixed_bytes(self.serialize_vec(&secp, true))?; Ok(()) } } @@ -684,7 +836,8 @@ impl Readable for PublicKey { // Read the public key in compressed form fn read(reader: &mut R) -> Result { let buf = reader.read_fixed_bytes(COMPRESSED_PUBLIC_KEY_SIZE)?; - let pk = PublicKey::from_slice(&buf) + let secp = Secp256k1::with_caps(ContextFlag::None); + let pk = PublicKey::from_slice(&secp, &buf) .map_err(|e| Error::CorruptedData(format!("Unable to read public key, {}", e)))?; Ok(pk) } @@ -1002,8 +1155,8 @@ where struct FieldVisitor; impl<'de> serde::de::Visitor<'de> for FieldVisitor { type Value = Field; - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - std::fmt::Formatter::write_str(formatter, "variant identifier") + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + fmt::Formatter::write_str(formatter, "variant identifier") } fn visit_u64(self, value: u64) -> Result where @@ -1084,7 +1237,7 @@ where b"Other" => Ok(Field::field16), b"UnexpectedEof" => Ok(Field::field17), _ => { - let value = &from_utf8_lossy(value); + let value = &string::String::from_utf8_lossy(value); Err(serde::de::Error::unknown_variant(value, VARIANTS)) } } @@ -1100,13 +1253,13 @@ where } } struct Visitor<'de> { - marker: PhantomData, - lifetime: PhantomData<&'de ()>, + marker: marker::PhantomData, + lifetime: marker::PhantomData<&'de ()>, } impl<'de> serde::de::Visitor<'de> for Visitor<'de> { type Value = io::ErrorKind; - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - std::fmt::Formatter::write_str(formatter, "enum io::ErrorKind") + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + fmt::Formatter::write_str(formatter, "enum io::ErrorKind") } fn visit_enum(self, data: A) -> Result where @@ -1308,8 +1461,8 @@ where "ErrorKind", VARIANTS, Visitor { - marker: PhantomData::, - lifetime: PhantomData, + marker: marker::PhantomData::, + lifetime: marker::PhantomData, }, ) } diff --git a/core/tests/block.rs b/core/tests/block.rs index 2be6d1bf63..4878511b98 100644 --- a/core/tests/block.rs +++ b/core/tests/block.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,14 +14,14 @@ mod common; use crate::common::{new_block, tx1i2o, tx2i1o, txspend1i1o}; -use crate::core::consensus::{self, BLOCK_OUTPUT_WEIGHT}; +use crate::core::consensus::{self, OUTPUT_WEIGHT}; use crate::core::core::block::{Block, BlockHeader, Error, HeaderVersion, UntrustedBlockHeader}; use crate::core::core::hash::Hashed; use crate::core::core::id::ShortIdentifiable; use crate::core::core::transaction::{ - self, KernelFeatures, NRDRelativeHeight, Output, OutputFeatures, OutputIdentifier, Transaction, + self, FeeFields, KernelFeatures, NRDRelativeHeight, Output, OutputFeatures, OutputIdentifier, + Transaction, }; -use crate::core::core::verifier_cache::{LruVerifierCache, VerifierCache}; use crate::core::core::{Committed, CompactBlock}; use crate::core::libtx::build::{self, input, output}; use crate::core::libtx::ProofBuilder; @@ -29,8 +29,7 @@ use crate::core::{global, pow, ser}; use chrono::Duration; use grin_core as core; use keychain::{BlindingFactor, ExtKeychain, Keychain}; -use std::sync::Arc; -use util::{secp, RwLock, ToHex}; +use util::{secp, ToHex}; // Setup test with AutomatedTesting chain_type; fn test_setup() { @@ -38,16 +37,12 @@ fn test_setup() { global::set_local_chain_type(global::ChainTypes::AutomatedTesting); } -fn verifier_cache() -> Arc> { - Arc::new(RwLock::new(LruVerifierCache::new())) -} - #[test] fn too_large_block() { test_setup(); let keychain = ExtKeychain::from_random_seed(false).unwrap(); let builder = ProofBuilder::new(&keychain); - let max_out = global::max_block_weight() / BLOCK_OUTPUT_WEIGHT; + let max_out = global::max_block_weight() / OUTPUT_WEIGHT; let mut pks = vec![]; for n in 0..(max_out + 1) { @@ -61,7 +56,7 @@ fn too_large_block() { parts.append(&mut vec![input(500000, pks.pop().unwrap())]); let tx = build::transaction( - KernelFeatures::Plain { fee: 2 }, + KernelFeatures::Plain { fee: 2.into() }, &parts, &keychain, &builder, @@ -71,9 +66,7 @@ fn too_large_block() { let prev = BlockHeader::default(); let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0); let b = new_block(&[tx], &keychain, &builder, &prev, &key_id); - assert!(b - .validate(&BlindingFactor::zero(), verifier_cache()) - .is_err()); + assert!(b.validate(&BlindingFactor::zero()).is_err()); } #[test] @@ -95,7 +88,6 @@ fn block_with_nrd_kernel_pre_post_hf3() { // Enable the global NRD feature flag. NRD kernels valid at HF3 at height 9. global::set_local_chain_type(global::ChainTypes::AutomatedTesting); global::set_local_nrd_enabled(true); - let keychain = ExtKeychain::from_random_seed(false).unwrap(); let builder = ProofBuilder::new(&keychain); let key_id1 = ExtKeychain::derive_key_id(1, 1, 0, 0, 0); @@ -103,7 +95,7 @@ fn block_with_nrd_kernel_pre_post_hf3() { let tx = build::transaction( KernelFeatures::NoRecentDuplicate { - fee: 2, + fee: 2.into(), relative_height: NRDRelativeHeight::new(1440).unwrap(), }, &[input(7, key_id1), output(5, key_id2)], @@ -130,7 +122,7 @@ fn block_with_nrd_kernel_pre_post_hf3() { // Block is invalid at header version 3 if it contains an NRD kernel. assert_eq!(b.header.version, HeaderVersion(3)); assert_eq!( - b.validate(&BlindingFactor::zero(), verifier_cache()), + b.validate(&BlindingFactor::zero()), Err(Error::NRDKernelPreHF3) ); @@ -151,9 +143,7 @@ fn block_with_nrd_kernel_pre_post_hf3() { // Block is valid at header version 4 (at HF height) if it contains an NRD kernel. assert_eq!(b.header.height, consensus::TESTING_THIRD_HARD_FORK); assert_eq!(b.header.version, HeaderVersion(4)); - assert!(b - .validate(&BlindingFactor::zero(), verifier_cache()) - .is_ok()); + assert!(b.validate(&BlindingFactor::zero()).is_ok()); let prev_height = consensus::TESTING_THIRD_HARD_FORK; let prev = BlockHeader { @@ -171,9 +161,7 @@ fn block_with_nrd_kernel_pre_post_hf3() { // Block is valid at header version 4 if it contains an NRD kernel. assert_eq!(b.header.version, HeaderVersion(4)); - assert!(b - .validate(&BlindingFactor::zero(), verifier_cache()) - .is_ok()); + assert!(b.validate(&BlindingFactor::zero()).is_ok()); } #[test] @@ -188,7 +176,7 @@ fn block_with_nrd_kernel_nrd_not_enabled() { let tx = build::transaction( KernelFeatures::NoRecentDuplicate { - fee: 2, + fee: 2.into(), relative_height: NRDRelativeHeight::new(1440).unwrap(), }, &[input(7, key_id1), output(5, key_id2)], @@ -216,7 +204,7 @@ fn block_with_nrd_kernel_nrd_not_enabled() { // Block is invalid as NRD not enabled. assert_eq!(b.header.version, HeaderVersion(3)); assert_eq!( - b.validate(&BlindingFactor::zero(), verifier_cache()), + b.validate(&BlindingFactor::zero()), Err(Error::NRDKernelNotEnabled) ); @@ -238,7 +226,7 @@ fn block_with_nrd_kernel_nrd_not_enabled() { assert_eq!(b.header.height, consensus::TESTING_THIRD_HARD_FORK); assert_eq!(b.header.version, HeaderVersion(4)); assert_eq!( - b.validate(&BlindingFactor::zero(), verifier_cache()), + b.validate(&BlindingFactor::zero()), Err(Error::NRDKernelNotEnabled) ); @@ -259,7 +247,7 @@ fn block_with_nrd_kernel_nrd_not_enabled() { // Block is invalid as NRD not enabled. assert_eq!(b.header.version, HeaderVersion(4)); assert_eq!( - b.validate(&BlindingFactor::zero(), verifier_cache()), + b.validate(&BlindingFactor::zero()), Err(Error::NRDKernelNotEnabled) ); } @@ -276,7 +264,7 @@ fn block_with_cut_through() { let btx1 = tx2i1o(); let btx2 = build::transaction( - KernelFeatures::Plain { fee: 2 }, + KernelFeatures::Plain { fee: 2.into() }, &[input(7, key_id1), output(5, key_id2.clone())], &keychain, &builder, @@ -292,8 +280,7 @@ fn block_with_cut_through() { // block should have been automatically compacted (including reward // output) and should still be valid - b.validate(&BlindingFactor::zero(), verifier_cache()) - .unwrap(); + b.validate(&BlindingFactor::zero()).unwrap(); assert_eq!(b.inputs().len(), 3); assert_eq!(b.outputs().len(), 3); } @@ -329,9 +316,7 @@ fn empty_block_with_coinbase_is_valid() { // the block should be valid here (single coinbase output with corresponding // txn kernel) - assert!(b - .validate(&BlindingFactor::zero(), verifier_cache()) - .is_ok()); + assert!(b.validate(&BlindingFactor::zero()).is_ok()); } #[test] @@ -357,7 +342,7 @@ fn remove_coinbase_output_flag() { .verify_kernel_sums(b.header.overage(), b.header.total_kernel_offset()) .is_ok()); assert_eq!( - b.validate(&BlindingFactor::zero(), verifier_cache()), + b.validate(&BlindingFactor::zero()), Err(Error::CoinbaseSumMismatch) ); } @@ -374,7 +359,9 @@ fn remove_coinbase_kernel_flag() { let mut b = new_block(&[], &keychain, &builder, &prev, &key_id); let mut kernel = b.kernels()[0].clone(); - kernel.features = KernelFeatures::Plain { fee: 0 }; + kernel.features = KernelFeatures::Plain { + fee: FeeFields::zero(), + }; b.body = b.body.replace_kernel(kernel); // Flipping the coinbase flag results in kernels not summing correctly. @@ -386,7 +373,7 @@ fn remove_coinbase_kernel_flag() { // Also results in the block no longer validating correctly // because the message being signed on each tx kernel includes the kernel features. assert_eq!( - b.validate(&BlindingFactor::zero(), verifier_cache()), + b.validate(&BlindingFactor::zero()), Err(Error::Transaction(transaction::Error::IncorrectSignature)) ); } @@ -762,7 +749,7 @@ fn same_amount_outputs_copy_range_proof() { let key_id3 = keychain::ExtKeychain::derive_key_id(1, 3, 0, 0, 0); let tx = build::transaction( - KernelFeatures::Plain { fee: 1 }, + KernelFeatures::Plain { fee: 1.into() }, &[input(7, key_id1), output(3, key_id2), output(3, key_id3)], &keychain, &builder, @@ -786,7 +773,7 @@ fn same_amount_outputs_copy_range_proof() { // block should have been automatically compacted (including reward // output) and should still be valid - match b.validate(&BlindingFactor::zero(), verifier_cache()) { + match b.validate(&BlindingFactor::zero()) { Err(Error::Transaction(transaction::Error::Secp(secp::Error::InvalidRangeProof))) => {} _ => panic!("Bad range proof should be invalid"), } @@ -803,7 +790,7 @@ fn wrong_amount_range_proof() { let key_id3 = keychain::ExtKeychain::derive_key_id(1, 3, 0, 0, 0); let tx1 = build::transaction( - KernelFeatures::Plain { fee: 1 }, + KernelFeatures::Plain { fee: 1.into() }, &[ input(7, key_id1.clone()), output(3, key_id2.clone()), @@ -814,7 +801,7 @@ fn wrong_amount_range_proof() { ) .unwrap(); let tx2 = build::transaction( - KernelFeatures::Plain { fee: 1 }, + KernelFeatures::Plain { fee: 1.into() }, &[input(7, key_id1), output(2, key_id2), output(4, key_id3)], &keychain, &builder, @@ -838,7 +825,7 @@ fn wrong_amount_range_proof() { // block should have been automatically compacted (including reward // output) and should still be valid - match b.validate(&BlindingFactor::zero(), verifier_cache()) { + match b.validate(&BlindingFactor::zero()) { Err(Error::Transaction(transaction::Error::Secp(secp::Error::InvalidRangeProof))) => {} _ => panic!("Bad range proof should be invalid"), } @@ -894,7 +881,9 @@ fn test_verify_cut_through_plain() -> Result<(), Error> { let builder = ProofBuilder::new(&keychain); let tx = build::transaction( - KernelFeatures::Plain { fee: 0 }, + KernelFeatures::Plain { + fee: FeeFields::zero(), + }, &[ build::input(10, key_id1.clone()), build::input(10, key_id2.clone()), @@ -913,7 +902,7 @@ fn test_verify_cut_through_plain() -> Result<(), Error> { // The block should fail validation due to cut-through. assert_eq!( - block.validate(&BlindingFactor::zero(), verifier_cache()), + block.validate(&BlindingFactor::zero()), Err(Error::Transaction(transaction::Error::CutThrough)) ); @@ -934,7 +923,7 @@ fn test_verify_cut_through_plain() -> Result<(), Error> { .replace_outputs(outputs); // Block validates successfully after applying cut-through. - block.validate(&BlindingFactor::zero(), verifier_cache())?; + block.validate(&BlindingFactor::zero())?; // Block validates via lightweight "read" validation. block.validate_read()?; @@ -958,7 +947,9 @@ fn test_verify_cut_through_coinbase() -> Result<(), Error> { let builder = ProofBuilder::new(&keychain); let tx = build::transaction( - KernelFeatures::Plain { fee: 0 }, + KernelFeatures::Plain { + fee: FeeFields::zero(), + }, &[ build::coinbase_input(consensus::MWC_FIRST_GROUP_REWARD, key_id1.clone()), build::coinbase_input(consensus::MWC_FIRST_GROUP_REWARD, key_id2.clone()), @@ -980,7 +971,7 @@ fn test_verify_cut_through_coinbase() -> Result<(), Error> { // The block should fail validation due to cut-through. assert_eq!( - block.validate(&BlindingFactor::zero(), verifier_cache()), + block.validate(&BlindingFactor::zero()), Err(Error::Transaction(transaction::Error::CutThrough)) ); @@ -1001,7 +992,7 @@ fn test_verify_cut_through_coinbase() -> Result<(), Error> { .replace_outputs(outputs); // Block validates successfully after applying cut-through. - block.validate(&BlindingFactor::zero(), verifier_cache())?; + block.validate(&BlindingFactor::zero())?; // Block validates via lightweight "read" validation. block.validate_read()?; diff --git a/core/tests/common.rs b/core/tests/common.rs index b7e78218f3..16f89dbe3c 100644 --- a/core/tests/common.rs +++ b/core/tests/common.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -37,7 +37,7 @@ pub fn tx2i1o() -> Transaction { let key_id3 = keychain::ExtKeychain::derive_key_id(1, 3, 0, 0, 0); let tx = build::transaction( - KernelFeatures::Plain { fee: 2 }, + KernelFeatures::Plain { fee: 2.into() }, &[input(10, key_id1), input(11, key_id2), output(19, key_id3)], &keychain, &builder, @@ -56,7 +56,7 @@ pub fn tx1i1o() -> Transaction { let key_id2 = keychain::ExtKeychain::derive_key_id(1, 2, 0, 0, 0); let tx = build::transaction( - KernelFeatures::Plain { fee: 2 }, + KernelFeatures::Plain { fee: 2.into() }, &[input(5, key_id1), output(3, key_id2)], &keychain, &builder, @@ -96,7 +96,7 @@ pub fn tx1i2o() -> Transaction { let key_id3 = keychain::ExtKeychain::derive_key_id(1, 3, 0, 0, 0); let tx = build::transaction( - KernelFeatures::Plain { fee: 2 }, + KernelFeatures::Plain { fee: 2.into() }, &[input(6, key_id1), output(3, key_id2), output(1, key_id3)], &keychain, &builder, @@ -120,7 +120,10 @@ where K: Keychain, B: ProofBuild, { - let fees = txs.iter().map(|tx| tx.fee()).sum(); + let fees = txs + .iter() + .map(|tx| tx.fee(previous_header.height + 1)) + .sum(); let reward_output = reward::output( keychain, builder, @@ -148,7 +151,7 @@ where B: ProofBuild, { build::transaction( - KernelFeatures::Plain { fee: 2 }, + KernelFeatures::Plain { fee: 2.into() }, &[input(v, key_id1), output(3, key_id2)], keychain, builder, diff --git a/core/tests/consensus_automated.rs b/core/tests/consensus_automated.rs index 037c071754..802a41baee 100644 --- a/core/tests/consensus_automated.rs +++ b/core/tests/consensus_automated.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -13,8 +13,8 @@ use chrono::Utc; use grin_core::consensus::{ - next_difficulty, HeaderInfo, AR_SCALE_DAMP_FACTOR, BLOCK_TIME_SEC, DIFFICULTY_ADJUST_WINDOW, - MIN_DIFFICULTY, + next_difficulty, HeaderDifficultyInfo, AR_SCALE_DAMP_FACTOR, BLOCK_TIME_SEC, + DIFFICULTY_ADJUST_WINDOW, MIN_DIFFICULTY, }; use grin_core::global; use grin_core::pow::Difficulty; @@ -27,7 +27,7 @@ fn next_target_adjustment() { let diff_min = Difficulty::min(); // Check we don't get stuck on difficulty <= MIN_DIFFICULTY (at 4x faster blocks at least) - let mut hi = HeaderInfo::from_diff_scaling(diff_min, AR_SCALE_DAMP_FACTOR as u32); + let mut hi = HeaderDifficultyInfo::from_diff_scaling(diff_min, AR_SCALE_DAMP_FACTOR as u32); hi.is_secondary = false; let hinext = next_difficulty( 1, @@ -54,7 +54,11 @@ fn next_target_adjustment() { // check pre difficulty_data_to_vector effect on retargetting assert_eq!( - next_difficulty(1, vec![HeaderInfo::from_ts_diff(42, hi.difficulty)]).difficulty, + next_difficulty( + 1, + vec![HeaderDifficultyInfo::from_ts_diff(42, hi.difficulty)] + ) + .difficulty, Difficulty::from_num(14913) ); @@ -63,10 +67,10 @@ fn next_target_adjustment() { let sec = DIFFICULTY_ADJUST_WINDOW / 2; let mut s1 = repeat(BLOCK_TIME_SEC, hi.clone(), sec, Some(cur_time)); let mut s2 = repeat_offs( - cur_time + (sec * BLOCK_TIME_SEC) as u64, BLOCK_TIME_SEC, 1500, - DIFFICULTY_ADJUST_WINDOW / 2, + sec, + cur_time + (sec * BLOCK_TIME_SEC) as u64, ); s2.append(&mut s1); assert_eq!( @@ -125,7 +129,12 @@ fn next_target_adjustment() { // Builds an iterator for next difficulty calculation with the provided // constant time interval, difficulty and total length. -fn repeat(interval: u64, diff: HeaderInfo, len: u64, cur_time: Option) -> Vec { +fn repeat( + interval: u64, + diff: HeaderDifficultyInfo, + len: u64, + cur_time: Option, +) -> Vec { let cur_time = match cur_time { Some(t) => t, None => Utc::now().timestamp() as u64, @@ -137,8 +146,8 @@ fn repeat(interval: u64, diff: HeaderInfo, len: u64, cur_time: Option) -> V let pairs = times.zip(diffs.iter()); pairs .map(|(t, d)| { - HeaderInfo::new( - diff.block_hash, + HeaderDifficultyInfo::new( + None, cur_time + t as u64, *d, diff.secondary_scaling, @@ -148,10 +157,10 @@ fn repeat(interval: u64, diff: HeaderInfo, len: u64, cur_time: Option) -> V .collect::>() } -fn repeat_offs(from: u64, interval: u64, diff: u64, len: u64) -> Vec { +fn repeat_offs(interval: u64, diff: u64, len: u64, from: u64) -> Vec { repeat( interval, - HeaderInfo::from_ts_diff(1, Difficulty::from_num(diff)), + HeaderDifficultyInfo::from_ts_diff(1, Difficulty::from_num(diff)), len, Some(from), ) diff --git a/core/tests/consensus_floonet.rs b/core/tests/consensus_floonet.rs deleted file mode 100644 index 8b13789179..0000000000 --- a/core/tests/consensus_floonet.rs +++ /dev/null @@ -1 +0,0 @@ - diff --git a/core/tests/consensus_mainnet.rs b/core/tests/consensus_mainnet.rs index 80c1ba89f6..a18c5913b2 100644 --- a/core/tests/consensus_mainnet.rs +++ b/core/tests/consensus_mainnet.rs @@ -1,4 +1,4 @@ -// Copyright 2019 The Grin Developers +// Copyright 2021 The Grin Developers // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -77,7 +77,12 @@ impl Display for DiffBlock { // Builds an iterator for next difficulty calculation with the provided // constant time interval, difficulty and total length. -fn repeat(interval: u64, diff: HeaderInfo, len: u64, cur_time: Option) -> Vec { +fn repeat( + interval: u64, + diff: HeaderDifficultyInfo, + len: u64, + cur_time: Option, +) -> Vec { let cur_time = match cur_time { Some(t) => t, None => Utc::now().timestamp() as u64, @@ -89,8 +94,8 @@ fn repeat(interval: u64, diff: HeaderInfo, len: u64, cur_time: Option) -> V let pairs = times.zip(diffs.iter()); pairs .map(|(t, d)| { - HeaderInfo::new( - diff.block_hash, + HeaderDifficultyInfo::new( + diff.hash, cur_time + t as u64, d.clone(), diff.secondary_scaling, @@ -101,27 +106,31 @@ fn repeat(interval: u64, diff: HeaderInfo, len: u64, cur_time: Option) -> V } // Creates a new chain with a genesis at a simulated difficulty -fn create_chain_sim(diff: u64) -> Vec<(HeaderInfo, DiffStats)> { +fn create_chain_sim(diff: u64) -> Vec<(HeaderDifficultyInfo, DiffStats)> { println!( "adding create: {}, {}", Utc::now().timestamp(), Difficulty::from_num(diff) ); - let return_vec = vec![HeaderInfo::from_ts_diff( + let return_vec = vec![HeaderDifficultyInfo::from_ts_diff( Utc::now().timestamp() as u64, Difficulty::from_num(diff), )]; let diff_stats = get_diff_stats(&return_vec); vec![( - HeaderInfo::from_ts_diff(Utc::now().timestamp() as u64, Difficulty::from_num(diff)), + HeaderDifficultyInfo::from_ts_diff( + Utc::now().timestamp() as u64, + Difficulty::from_num(diff), + ), diff_stats, )] } -fn get_diff_stats(chain_sim: &[HeaderInfo]) -> DiffStats { +fn get_diff_stats(chain_sim: &[HeaderDifficultyInfo]) -> DiffStats { // Fill out some difficulty stats for convenience let diff_iter = chain_sim.to_vec(); - let last_blocks: Vec = global::difficulty_data_to_vector(diff_iter.iter().cloned()); + let last_blocks: Vec = + global::difficulty_data_to_vector(diff_iter.iter().cloned()); let mut last_time = last_blocks[0].timestamp; let tip_height = chain_sim.len(); @@ -132,10 +141,11 @@ fn get_diff_stats(chain_sim: &[HeaderInfo]) -> DiffStats { let mut i = 1; - let sum_blocks: Vec = global::difficulty_data_to_vector(diff_iter.iter().cloned()) - .into_iter() - .take(DIFFICULTY_ADJUST_WINDOW as usize) - .collect(); + let sum_blocks: Vec = + global::difficulty_data_to_vector(diff_iter.iter().cloned()) + .into_iter() + .take(DIFFICULTY_ADJUST_WINDOW as usize) + .collect(); let sum_entries: Vec = sum_blocks .iter() @@ -195,19 +205,23 @@ fn get_diff_stats(chain_sim: &[HeaderInfo]) -> DiffStats { // from the difficulty adjustment at interval seconds from the previous block fn add_block( interval: u64, - chain_sim: Vec<(HeaderInfo, DiffStats)>, -) -> Vec<(HeaderInfo, DiffStats)> { + chain_sim: Vec<(HeaderDifficultyInfo, DiffStats)>, +) -> Vec<(HeaderDifficultyInfo, DiffStats)> { let mut ret_chain_sim = chain_sim.clone(); - let mut return_chain: Vec = chain_sim.clone().iter().map(|e| e.0.clone()).collect(); + let mut return_chain: Vec = + chain_sim.clone().iter().map(|e| e.0.clone()).collect(); // get last interval let diff = next_difficulty(1, return_chain.clone()); let last_elem = chain_sim.first().unwrap().clone().0; let time = last_elem.timestamp + interval; - return_chain.insert(0, HeaderInfo::from_ts_diff(time, diff.difficulty)); + return_chain.insert(0, HeaderDifficultyInfo::from_ts_diff(time, diff.difficulty)); let diff_stats = get_diff_stats(&return_chain); ret_chain_sim.insert( 0, - (HeaderInfo::from_ts_diff(time, diff.difficulty), diff_stats), + ( + HeaderDifficultyInfo::from_ts_diff(time, diff.difficulty), + diff_stats, + ), ); ret_chain_sim } @@ -215,9 +229,9 @@ fn add_block( // Adds another n 'blocks' to the iterator, with difficulty calculated fn add_block_repeated( interval: u64, - chain_sim: Vec<(HeaderInfo, DiffStats)>, + chain_sim: Vec<(HeaderDifficultyInfo, DiffStats)>, iterations: usize, -) -> Vec<(HeaderInfo, DiffStats)> { +) -> Vec<(HeaderDifficultyInfo, DiffStats)> { let mut return_chain = chain_sim; for _ in 0..iterations { return_chain = add_block(interval, return_chain.clone()); @@ -227,7 +241,7 @@ fn add_block_repeated( // Prints the contents of the iterator and its difficulties.. useful for // tweaking -fn print_chain_sim(chain_sim: Vec<(HeaderInfo, DiffStats)>) { +fn print_chain_sim(chain_sim: Vec<(HeaderDifficultyInfo, DiffStats)>) { let mut chain_sim = chain_sim; chain_sim.reverse(); let mut last_time = 0; @@ -267,10 +281,10 @@ fn print_chain_sim(chain_sim: Vec<(HeaderInfo, DiffStats)>) { }); } -fn repeat_offs(from: u64, interval: u64, diff: u64, len: u64) -> Vec { +fn repeat_offs(from: u64, interval: u64, diff: u64, len: u64) -> Vec { repeat( interval, - HeaderInfo::from_ts_diff(1, Difficulty::from_num(diff)), + HeaderDifficultyInfo::from_ts_diff(1, Difficulty::from_num(diff)), len, Some(from), ) @@ -357,7 +371,7 @@ fn next_target_adjustment() { let diff_min = Difficulty::min(); // Check we don't get stuck on difficulty <= MIN_DIFFICULTY (at 4x faster blocks at least) - let mut hi = HeaderInfo::from_diff_scaling(diff_min, AR_SCALE_DAMP_FACTOR as u32); + let mut hi = HeaderDifficultyInfo::from_diff_scaling(diff_min, AR_SCALE_DAMP_FACTOR as u32); hi.is_secondary = false; let hinext = next_difficulty( 1, @@ -384,7 +398,11 @@ fn next_target_adjustment() { // check pre difficulty_data_to_vector effect on retargetting assert_eq!( - next_difficulty(1, vec![HeaderInfo::from_ts_diff(42, hi.difficulty)]).difficulty, + next_difficulty( + 1, + vec![HeaderDifficultyInfo::from_ts_diff(42, hi.difficulty)] + ) + .difficulty, Difficulty::from_num(14913) ); @@ -540,7 +558,7 @@ fn test_secondary_pow_scale() { global::set_local_chain_type(global::ChainTypes::Mainnet); let window = DIFFICULTY_ADJUST_WINDOW; - let mut hi = HeaderInfo::from_diff_scaling(Difficulty::from_num(10), 100); + let mut hi = HeaderDifficultyInfo::from_diff_scaling(Difficulty::from_num(10), 100); // all primary, factor should increase so it becomes easier to find a high // difficulty block @@ -564,7 +582,8 @@ fn test_secondary_pow_scale() { 13 ); // same as above, testing lowest bound - let mut low_hi = HeaderInfo::from_diff_scaling(Difficulty::from_num(10), MIN_AR_SCALE as u32); + let mut low_hi = + HeaderDifficultyInfo::from_diff_scaling(Difficulty::from_num(10), MIN_AR_SCALE as u32); low_hi.is_secondary = true; assert_eq!( secondary_pow_scaling( @@ -574,7 +593,7 @@ fn test_secondary_pow_scale() { MIN_AR_SCALE as u32 ); // the right ratio of 95% secondary - let mut primary_hi = HeaderInfo::from_diff_scaling(Difficulty::from_num(10), 50); + let mut primary_hi = HeaderDifficultyInfo::from_diff_scaling(Difficulty::from_num(10), 50); primary_hi.is_secondary = false; assert_eq!( secondary_pow_scaling( diff --git a/core/tests/core.rs b/core/tests/core.rs index 6a5680c46b..0d1b49ad92 100644 --- a/core/tests/core.rs +++ b/core/tests/core.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,10 +19,9 @@ pub mod common; use self::core::core::block::BlockHeader; use self::core::core::block::Error::KernelLockHeight; use self::core::core::hash::{Hashed, ZERO_HASH}; -use self::core::core::verifier_cache::{LruVerifierCache, VerifierCache}; use self::core::core::{ - aggregate, deaggregate, KernelFeatures, Output, OutputFeatures, OutputIdentifier, Transaction, - TxKernel, Weighting, + aggregate, deaggregate, FeeFields, KernelFeatures, Output, OutputFeatures, OutputIdentifier, + Transaction, TxKernel, Weighting, }; use self::core::libtx::build::{self, initial_tx, input, output, with_excess}; use self::core::libtx::{aggsig, ProofBuilder}; @@ -30,9 +29,7 @@ use self::core::{global, ser}; use crate::common::{new_block, tx1i1o, tx1i2o, tx2i1o}; use grin_core as core; use keychain::{BlindingFactor, ExtKeychain, Keychain}; -use std::sync::Arc; use util::static_secp_instance; -use util::RwLock; // Setup test with AutomatedTesting chain_type; fn test_setup() { @@ -97,7 +94,8 @@ fn simple_tx_ser_deser() { let mut vec = Vec::new(); ser::serialize_default(&mut vec, &tx).expect("serialization failed"); let dtx: Transaction = ser::deserialize_default(&mut &vec[..]).unwrap(); - assert_eq!(dtx.fee(), 2); + let height = 42; // arbitrary + assert_eq!(dtx.fee(height), 2); assert_eq!(dtx.inputs().len(), 2); assert_eq!(dtx.outputs().len(), 1); assert_eq!(tx.hash(), dtx.hash()); @@ -130,7 +128,9 @@ fn test_zero_commit_fails() { // blinding should fail as signing with a zero r*G shouldn't work let res = build::transaction( - KernelFeatures::Plain { fee: 0 }, + KernelFeatures::Plain { + fee: FeeFields::zero(), + }, &[input(10, key_id1.clone()), output(10, key_id1)], &keychain, &builder, @@ -138,10 +138,6 @@ fn test_zero_commit_fails() { assert!(res.is_err()); } -fn verifier_cache() -> Arc> { - Arc::new(RwLock::new(LruVerifierCache::new())) -} - #[test] fn build_tx_kernel() { test_setup(); @@ -153,7 +149,7 @@ fn build_tx_kernel() { // first build a valid tx with corresponding blinding factor let tx = build::transaction( - KernelFeatures::Plain { fee: 2 }, + KernelFeatures::Plain { fee: 2.into() }, &[input(10, key_id1), output(5, key_id2), output(3, key_id3)], &keychain, &builder, @@ -161,16 +157,16 @@ fn build_tx_kernel() { .unwrap(); // check the tx is valid - tx.validate(Weighting::AsTransaction, verifier_cache()) - .unwrap(); + let height = 42; // arbitrary + tx.validate(Weighting::AsTransaction, height).unwrap(); // check the kernel is also itself valid assert_eq!(tx.kernels().len(), 1); let kern = &tx.kernels()[0]; kern.verify().unwrap(); - assert_eq!(kern.features, KernelFeatures::Plain { fee: 2 }); - assert_eq!(2, tx.fee()); + assert_eq!(kern.features, KernelFeatures::Plain { fee: 2.into() }); + assert_eq!(2, tx.fee(height)); } // Proof of concept demonstrating we can build two transactions that share @@ -192,16 +188,16 @@ fn build_two_half_kernels() { let key_id3 = ExtKeychain::derive_key_id(1, 3, 0, 0, 0); // build kernel with associated private excess - let mut kernel = TxKernel::with_features(KernelFeatures::Plain { fee: 2 }); + let mut kernel = TxKernel::with_features(KernelFeatures::Plain { fee: 2.into() }); // Construct the message to be signed. let msg = kernel.msg_to_sign().unwrap(); // Generate a kernel with public excess and associated signature. - let excess = BlindingFactor::rand(); - let skey = excess.secret_key().unwrap(); + let excess = BlindingFactor::rand(keychain.secp()); + let skey = excess.secret_key(keychain.secp()).unwrap(); kernel.excess = keychain.secp().commit(0, skey).unwrap(); - let pubkey = &kernel.excess.to_pubkey().unwrap(); + let pubkey = &kernel.excess.to_pubkey(keychain.secp()).unwrap(); kernel.excess_sig = aggsig::sign_with_blinding(&keychain.secp(), &msg, &excess, Some(&pubkey)).unwrap(); kernel.verify().unwrap(); @@ -224,15 +220,10 @@ fn build_two_half_kernels() { ) .unwrap(); - assert_eq!( - tx1.validate(Weighting::AsTransaction, verifier_cache()), - Ok(()), - ); + let height = 42; // arbitrary + assert_eq!(tx1.validate(Weighting::AsTransaction, height), Ok(()),); - assert_eq!( - tx2.validate(Weighting::AsTransaction, verifier_cache()), - Ok(()), - ); + assert_eq!(tx2.validate(Weighting::AsTransaction, height), Ok(()),); // The transactions share an identical kernel. assert_eq!(tx1.kernels()[0], tx2.kernels()[0]); @@ -256,19 +247,14 @@ fn transaction_cut_through() { let tx1 = tx1i2o(); let tx2 = tx2i1o(); - assert!(tx1 - .validate(Weighting::AsTransaction, verifier_cache()) - .is_ok()); - assert!(tx2 - .validate(Weighting::AsTransaction, verifier_cache()) - .is_ok()); - - let vc = verifier_cache(); + let height = 42; // arbitrary + assert!(tx1.validate(Weighting::AsTransaction, height).is_ok()); + assert!(tx2.validate(Weighting::AsTransaction, height).is_ok()); // now build a "cut_through" tx from tx1 and tx2 let tx3 = aggregate(&[tx1, tx2]).unwrap(); - assert!(tx3.validate(Weighting::AsTransaction, vc.clone()).is_ok()); + assert!(tx3.validate(Weighting::AsTransaction, height).is_ok()); } // Attempt to deaggregate a multi-kernel transaction in a different way @@ -280,33 +266,30 @@ fn multi_kernel_transaction_deaggregation() { let tx3 = tx1i1o(); let tx4 = tx1i1o(); - let vc = verifier_cache(); - - assert!(tx1.validate(Weighting::AsTransaction, vc.clone()).is_ok()); - assert!(tx2.validate(Weighting::AsTransaction, vc.clone()).is_ok()); - assert!(tx3.validate(Weighting::AsTransaction, vc.clone()).is_ok()); - assert!(tx4.validate(Weighting::AsTransaction, vc.clone()).is_ok()); + let height = 42; // arbitrary + assert!(tx1.validate(Weighting::AsTransaction, height).is_ok()); + assert!(tx2.validate(Weighting::AsTransaction, height).is_ok()); + assert!(tx3.validate(Weighting::AsTransaction, height).is_ok()); + assert!(tx4.validate(Weighting::AsTransaction, height).is_ok()); let tx1234 = aggregate(&[tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone()]).unwrap(); let tx12 = aggregate(&[tx1, tx2]).unwrap(); let tx34 = aggregate(&[tx3, tx4]).unwrap(); - assert!(tx1234 - .validate(Weighting::AsTransaction, vc.clone()) - .is_ok()); - assert!(tx12.validate(Weighting::AsTransaction, vc.clone()).is_ok()); - assert!(tx34.validate(Weighting::AsTransaction, vc.clone()).is_ok()); + assert!(tx1234.validate(Weighting::AsTransaction, height).is_ok()); + assert!(tx12.validate(Weighting::AsTransaction, height).is_ok()); + assert!(tx34.validate(Weighting::AsTransaction, height).is_ok()); let deaggregated_tx34 = deaggregate(tx1234.clone(), &[tx12.clone()]).unwrap(); assert!(deaggregated_tx34 - .validate(Weighting::AsTransaction, vc.clone()) + .validate(Weighting::AsTransaction, height) .is_ok()); assert_eq!(tx34, deaggregated_tx34); let deaggregated_tx12 = deaggregate(tx1234, &[tx34]).unwrap(); assert!(deaggregated_tx12 - .validate(Weighting::AsTransaction, vc.clone()) + .validate(Weighting::AsTransaction, height) .is_ok()); assert_eq!(tx12, deaggregated_tx12); } @@ -318,21 +301,20 @@ fn multi_kernel_transaction_deaggregation_2() { let tx2 = tx1i1o(); let tx3 = tx1i1o(); - let vc = verifier_cache(); - - assert!(tx1.validate(Weighting::AsTransaction, vc.clone()).is_ok()); - assert!(tx2.validate(Weighting::AsTransaction, vc.clone()).is_ok()); - assert!(tx3.validate(Weighting::AsTransaction, vc.clone()).is_ok()); + let height = 42; // arbitrary + assert!(tx1.validate(Weighting::AsTransaction, height).is_ok()); + assert!(tx2.validate(Weighting::AsTransaction, height).is_ok()); + assert!(tx3.validate(Weighting::AsTransaction, height).is_ok()); let tx123 = aggregate(&[tx1.clone(), tx2.clone(), tx3.clone()]).unwrap(); let tx12 = aggregate(&[tx1, tx2]).unwrap(); - assert!(tx123.validate(Weighting::AsTransaction, vc.clone()).is_ok()); - assert!(tx12.validate(Weighting::AsTransaction, vc.clone()).is_ok()); + assert!(tx123.validate(Weighting::AsTransaction, height).is_ok()); + assert!(tx12.validate(Weighting::AsTransaction, height).is_ok()); let deaggregated_tx3 = deaggregate(tx123, &[tx12]).unwrap(); assert!(deaggregated_tx3 - .validate(Weighting::AsTransaction, vc.clone()) + .validate(Weighting::AsTransaction, height) .is_ok()); assert_eq!(tx3, deaggregated_tx3); } @@ -344,22 +326,21 @@ fn multi_kernel_transaction_deaggregation_3() { let tx2 = tx1i1o(); let tx3 = tx1i1o(); - let vc = verifier_cache(); - - assert!(tx1.validate(Weighting::AsTransaction, vc.clone()).is_ok()); - assert!(tx2.validate(Weighting::AsTransaction, vc.clone()).is_ok()); - assert!(tx3.validate(Weighting::AsTransaction, vc.clone()).is_ok()); + let height = 42; // arbitrary + assert!(tx1.validate(Weighting::AsTransaction, height).is_ok()); + assert!(tx2.validate(Weighting::AsTransaction, height).is_ok()); + assert!(tx3.validate(Weighting::AsTransaction, height).is_ok()); let tx123 = aggregate(&[tx1.clone(), tx2.clone(), tx3.clone()]).unwrap(); let tx13 = aggregate(&[tx1, tx3]).unwrap(); let tx2 = aggregate(&[tx2]).unwrap(); - assert!(tx123.validate(Weighting::AsTransaction, vc.clone()).is_ok()); - assert!(tx2.validate(Weighting::AsTransaction, vc.clone()).is_ok()); + assert!(tx123.validate(Weighting::AsTransaction, height).is_ok()); + assert!(tx2.validate(Weighting::AsTransaction, height).is_ok()); let deaggregated_tx13 = deaggregate(tx123, &[tx2]).unwrap(); assert!(deaggregated_tx13 - .validate(Weighting::AsTransaction, vc.clone()) + .validate(Weighting::AsTransaction, height) .is_ok()); assert_eq!(tx13, deaggregated_tx13); } @@ -373,13 +354,12 @@ fn multi_kernel_transaction_deaggregation_4() { let tx4 = tx1i1o(); let tx5 = tx1i1o(); - let vc = verifier_cache(); - - assert!(tx1.validate(Weighting::AsTransaction, vc.clone()).is_ok()); - assert!(tx2.validate(Weighting::AsTransaction, vc.clone()).is_ok()); - assert!(tx3.validate(Weighting::AsTransaction, vc.clone()).is_ok()); - assert!(tx4.validate(Weighting::AsTransaction, vc.clone()).is_ok()); - assert!(tx5.validate(Weighting::AsTransaction, vc.clone()).is_ok()); + let height = 42; // arbitrary + assert!(tx1.validate(Weighting::AsTransaction, height).is_ok()); + assert!(tx2.validate(Weighting::AsTransaction, height).is_ok()); + assert!(tx3.validate(Weighting::AsTransaction, height).is_ok()); + assert!(tx4.validate(Weighting::AsTransaction, height).is_ok()); + assert!(tx5.validate(Weighting::AsTransaction, height).is_ok()); let tx12345 = aggregate(&[ tx1.clone(), @@ -389,13 +369,11 @@ fn multi_kernel_transaction_deaggregation_4() { tx5.clone(), ]) .unwrap(); - assert!(tx12345 - .validate(Weighting::AsTransaction, vc.clone()) - .is_ok()); + assert!(tx12345.validate(Weighting::AsTransaction, height).is_ok()); let deaggregated_tx5 = deaggregate(tx12345, &[tx1, tx2, tx3, tx4]).unwrap(); assert!(deaggregated_tx5 - .validate(Weighting::AsTransaction, vc.clone()) + .validate(Weighting::AsTransaction, height) .is_ok()); assert_eq!(tx5, deaggregated_tx5); } @@ -409,13 +387,12 @@ fn multi_kernel_transaction_deaggregation_5() { let tx4 = tx1i1o(); let tx5 = tx1i1o(); - let vc = verifier_cache(); - - assert!(tx1.validate(Weighting::AsTransaction, vc.clone()).is_ok()); - assert!(tx2.validate(Weighting::AsTransaction, vc.clone()).is_ok()); - assert!(tx3.validate(Weighting::AsTransaction, vc.clone()).is_ok()); - assert!(tx4.validate(Weighting::AsTransaction, vc.clone()).is_ok()); - assert!(tx5.validate(Weighting::AsTransaction, vc.clone()).is_ok()); + let height = 42; // arbitrary + assert!(tx1.validate(Weighting::AsTransaction, height).is_ok()); + assert!(tx2.validate(Weighting::AsTransaction, height).is_ok()); + assert!(tx3.validate(Weighting::AsTransaction, height).is_ok()); + assert!(tx4.validate(Weighting::AsTransaction, height).is_ok()); + assert!(tx5.validate(Weighting::AsTransaction, height).is_ok()); let tx12345 = aggregate(&[ tx1.clone(), @@ -428,13 +405,11 @@ fn multi_kernel_transaction_deaggregation_5() { let tx12 = aggregate(&[tx1, tx2]).unwrap(); let tx34 = aggregate(&[tx3, tx4]).unwrap(); - assert!(tx12345 - .validate(Weighting::AsTransaction, vc.clone()) - .is_ok()); + assert!(tx12345.validate(Weighting::AsTransaction, height).is_ok()); let deaggregated_tx5 = deaggregate(tx12345, &[tx12, tx34]).unwrap(); assert!(deaggregated_tx5 - .validate(Weighting::AsTransaction, vc.clone()) + .validate(Weighting::AsTransaction, height) .is_ok()); assert_eq!(tx5, deaggregated_tx5); } @@ -446,27 +421,26 @@ fn basic_transaction_deaggregation() { let tx1 = tx1i2o(); let tx2 = tx2i1o(); - let vc = verifier_cache(); - - assert!(tx1.validate(Weighting::AsTransaction, vc.clone()).is_ok()); - assert!(tx2.validate(Weighting::AsTransaction, vc.clone()).is_ok()); + let height = 42; // arbitrary + assert!(tx1.validate(Weighting::AsTransaction, height).is_ok()); + assert!(tx2.validate(Weighting::AsTransaction, height).is_ok()); // now build a "cut_through" tx from tx1 and tx2 let tx3 = aggregate(&[tx1.clone(), tx2.clone()]).unwrap(); - assert!(tx3.validate(Weighting::AsTransaction, vc.clone()).is_ok()); + assert!(tx3.validate(Weighting::AsTransaction, height).is_ok()); let deaggregated_tx1 = deaggregate(tx3.clone(), &[tx2.clone()]).unwrap(); assert!(deaggregated_tx1 - .validate(Weighting::AsTransaction, vc.clone()) + .validate(Weighting::AsTransaction, height) .is_ok()); assert_eq!(tx1, deaggregated_tx1); let deaggregated_tx2 = deaggregate(tx3, &[tx1]).unwrap(); assert!(deaggregated_tx2 - .validate(Weighting::AsTransaction, vc.clone()) + .validate(Weighting::AsTransaction, height) .is_ok()); assert_eq!(tx2, deaggregated_tx2); } @@ -480,7 +454,7 @@ fn hash_output() { let key_id3 = ExtKeychain::derive_key_id(1, 3, 0, 0, 0); let tx = build::transaction( - KernelFeatures::Plain { fee: 1 }, + KernelFeatures::Plain { fee: 1.into() }, &[input(75, key_id1), output(42, key_id2), output(32, key_id3)], &keychain, &builder, @@ -496,9 +470,8 @@ fn hash_output() { #[test] fn blind_tx() { let btx = tx2i1o(); - assert!(btx - .validate(Weighting::AsTransaction, verifier_cache()) - .is_ok()); + let height = 42; // arbitrary + assert!(btx.validate(Weighting::AsTransaction, height).is_ok()); // Ignored for bullet proofs, because calling range_proof_info // with a bullet proof causes painful errors @@ -543,8 +516,9 @@ fn tx_build_exchange() { // Alice builds her transaction, with change, which also produces the sum // of blinding factors before they're obscured. - let tx = Transaction::empty() - .with_kernel(TxKernel::with_features(KernelFeatures::Plain { fee: 2 })); + let tx = Transaction::empty().with_kernel(TxKernel::with_features(KernelFeatures::Plain { + fee: 2.into(), + })); let (tx, sum) = build::partial_transaction(tx, &[in1, in2, output(1, key_id3)], &keychain, &builder) .unwrap(); @@ -556,7 +530,7 @@ fn tx_build_exchange() { // blinding factors. He adds his output, finalizes the transaction so it's // ready for broadcast. let tx_final = build::transaction( - KernelFeatures::Plain { fee: 2 }, + KernelFeatures::Plain { fee: 2.into() }, &[ initial_tx(tx_alice), with_excess(blind_sum), @@ -567,9 +541,8 @@ fn tx_build_exchange() { ) .unwrap(); - tx_final - .validate(Weighting::AsTransaction, verifier_cache()) - .unwrap(); + let height = 42; // arbitrary + tx_final.validate(Weighting::AsTransaction, height).unwrap(); } #[test] @@ -583,8 +556,7 @@ fn reward_empty_block() { let b = new_block(&[], &keychain, &builder, &previous_header, &key_id); - b.validate(&BlindingFactor::zero(), verifier_cache()) - .unwrap(); + b.validate(&BlindingFactor::zero()).unwrap(); } #[test] @@ -594,15 +566,13 @@ fn reward_with_tx_block() { let builder = ProofBuilder::new(&keychain); let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0); - let vc = verifier_cache(); - let tx1 = tx2i1o(); - tx1.validate(Weighting::AsTransaction, vc.clone()).unwrap(); - let previous_header = BlockHeader::default(); + tx1.validate(Weighting::AsTransaction, previous_header.height + 1) + .unwrap(); let block = new_block(&[tx1], &keychain, &builder, &previous_header, &key_id); - block.validate(&BlindingFactor::zero(), vc.clone()).unwrap(); + block.validate(&BlindingFactor::zero()).unwrap(); } #[test] @@ -612,15 +582,13 @@ fn simple_block() { let builder = ProofBuilder::new(&keychain); let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0); - let vc = verifier_cache(); - let tx1 = tx2i1o(); let tx2 = tx1i1o(); let previous_header = BlockHeader::default(); let b = new_block(&[tx1, tx2], &keychain, &builder, &previous_header, &key_id); - b.validate(&BlindingFactor::zero(), vc.clone()).unwrap(); + b.validate(&BlindingFactor::zero()).unwrap(); } #[test] @@ -632,13 +600,11 @@ fn test_block_with_timelocked_tx() { let key_id2 = ExtKeychain::derive_key_id(1, 2, 0, 0, 0); let key_id3 = ExtKeychain::derive_key_id(1, 3, 0, 0, 0); - let vc = verifier_cache(); - // first check we can add a timelocked tx where lock height matches current // block height and that the resulting block is valid let tx1 = build::transaction( KernelFeatures::HeightLocked { - fee: 2, + fee: 2.into(), lock_height: 1, }, &[input(5, key_id1.clone()), output(3, key_id2.clone())], @@ -656,13 +622,13 @@ fn test_block_with_timelocked_tx() { &previous_header, &key_id3.clone(), ); - b.validate(&BlindingFactor::zero(), vc.clone()).unwrap(); + b.validate(&BlindingFactor::zero()).unwrap(); // now try adding a timelocked tx where lock height is greater than current // block height let tx1 = build::transaction( KernelFeatures::HeightLocked { - fee: 2, + fee: 2.into(), lock_height: 2, }, &[input(5, key_id1), output(3, key_id2)], @@ -674,7 +640,7 @@ fn test_block_with_timelocked_tx() { let previous_header = BlockHeader::default(); let b = new_block(&[tx1], &keychain, &builder, &previous_header, &key_id3); - match b.validate(&BlindingFactor::zero(), vc.clone()) { + match b.validate(&BlindingFactor::zero()) { Err(KernelLockHeight(height, _)) => { assert_eq!(height, 2); } @@ -686,14 +652,14 @@ fn test_block_with_timelocked_tx() { pub fn test_verify_1i1o_sig() { test_setup(); let tx = tx1i1o(); - tx.validate(Weighting::AsTransaction, verifier_cache()) - .unwrap(); + let height = 42; // arbitrary + tx.validate(Weighting::AsTransaction, height).unwrap(); } #[test] pub fn test_verify_2i1o_sig() { test_setup(); let tx = tx2i1o(); - tx.validate(Weighting::AsTransaction, verifier_cache()) - .unwrap(); + let height = 42; // arbitrary + tx.validate(Weighting::AsTransaction, height).unwrap(); } diff --git a/core/tests/merkle_proof.rs b/core/tests/merkle_proof.rs index dfcf3432c7..09a57d80fd 100644 --- a/core/tests/merkle_proof.rs +++ b/core/tests/merkle_proof.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ mod common; use self::core::core::merkle_proof::MerkleProof; -use self::core::core::pmmr::{VecBackend, PMMR}; +use self::core::core::pmmr::{ReadablePMMR, VecBackend, PMMR}; use self::core::ser::{self, PMMRIndexHashable}; use crate::common::TestElem; use grin_core as core; @@ -34,7 +34,7 @@ fn merkle_proof_ser_deser() { for x in 0..15 { pmmr.push(&TestElem([0, 0, 0, x])).unwrap(); } - let proof = pmmr.merkle_proof(9).unwrap(); + let proof = pmmr.merkle_proof(8).unwrap(); let mut vec = Vec::new(); ser::serialize_default(&mut vec, &proof).expect("serialization failed"); @@ -49,12 +49,12 @@ fn pmmr_merkle_proof_prune_and_rewind() { let mut pmmr = PMMR::new(&mut ba); pmmr.push(&TestElem([0, 0, 0, 1])).unwrap(); pmmr.push(&TestElem([0, 0, 0, 2])).unwrap(); - let proof = pmmr.merkle_proof(2).unwrap(); + let proof = pmmr.merkle_proof(1).unwrap(); // now prune an element and check we can still generate // the correct Merkle proof for the other element (after sibling pruned) - pmmr.prune(1).unwrap(); - let proof_2 = pmmr.merkle_proof(2).unwrap(); + pmmr.prune(0).unwrap(); + let proof_2 = pmmr.merkle_proof(1).unwrap(); assert_eq!(proof, proof_2); } @@ -77,113 +77,113 @@ fn pmmr_merkle_proof() { pmmr.push(&elems[0]).unwrap(); let pos_0 = elems[0].hash_with_index(0); - assert_eq!(pmmr.get_hash(1).unwrap(), pos_0); + assert_eq!(pmmr.get_hash(0).unwrap(), pos_0); - let proof = pmmr.merkle_proof(1).unwrap(); + let proof = pmmr.merkle_proof(0).unwrap(); assert_eq!(proof.path, vec![]); - assert!(proof.verify(pmmr.root().unwrap(), &elems[0], 1).is_ok()); + assert!(proof.verify(pmmr.root().unwrap(), &elems[0], 0).is_ok()); pmmr.push(&elems[1]).unwrap(); let pos_1 = elems[1].hash_with_index(1); - assert_eq!(pmmr.get_hash(2).unwrap(), pos_1); + assert_eq!(pmmr.get_hash(1).unwrap(), pos_1); let pos_2 = (pos_0, pos_1).hash_with_index(2); - assert_eq!(pmmr.get_hash(3).unwrap(), pos_2); + assert_eq!(pmmr.get_hash(2).unwrap(), pos_2); assert_eq!(pmmr.root().unwrap(), pos_2); - assert_eq!(pmmr.peaks().collect::>(), [pos_2]); + assert_eq!(pmmr.peaks(), vec![pos_2]); // single peak, path with single sibling - let proof = pmmr.merkle_proof(1).unwrap(); + let proof = pmmr.merkle_proof(0).unwrap(); assert_eq!(proof.path, vec![pos_1]); - assert!(proof.verify(pmmr.root().unwrap(), &elems[0], 1).is_ok()); + assert!(proof.verify(pmmr.root().unwrap(), &elems[0], 0).is_ok()); - let proof = pmmr.merkle_proof(2).unwrap(); + let proof = pmmr.merkle_proof(1).unwrap(); assert_eq!(proof.path, vec![pos_0]); - assert!(proof.verify(pmmr.root().unwrap(), &elems[1], 2).is_ok()); + assert!(proof.verify(pmmr.root().unwrap(), &elems[1], 1).is_ok()); // three leaves, two peaks (one also the right-most leaf) pmmr.push(&elems[2]).unwrap(); let pos_3 = elems[2].hash_with_index(3); - assert_eq!(pmmr.get_hash(4).unwrap(), pos_3); + assert_eq!(pmmr.get_hash(3).unwrap(), pos_3); assert_eq!(pmmr.root().unwrap(), (pos_2, pos_3).hash_with_index(4)); - assert_eq!(pmmr.peaks().collect::>(), [pos_2, pos_3]); + assert_eq!(pmmr.peaks(), vec![pos_2, pos_3]); - let proof = pmmr.merkle_proof(1).unwrap(); + let proof = pmmr.merkle_proof(0).unwrap(); assert_eq!(proof.path, vec![pos_1, pos_3]); - assert!(proof.verify(pmmr.root().unwrap(), &elems[0], 1).is_ok()); + assert!(proof.verify(pmmr.root().unwrap(), &elems[0], 0).is_ok()); - let proof = pmmr.merkle_proof(2).unwrap(); + let proof = pmmr.merkle_proof(1).unwrap(); assert_eq!(proof.path, vec![pos_0, pos_3]); - assert!(proof.verify(pmmr.root().unwrap(), &elems[1], 2).is_ok()); + assert!(proof.verify(pmmr.root().unwrap(), &elems[1], 1).is_ok()); - let proof = pmmr.merkle_proof(4).unwrap(); + let proof = pmmr.merkle_proof(3).unwrap(); assert_eq!(proof.path, vec![pos_2]); - assert!(proof.verify(pmmr.root().unwrap(), &elems[2], 4).is_ok()); + assert!(proof.verify(pmmr.root().unwrap(), &elems[2], 3).is_ok()); // 7 leaves, 3 peaks, 11 pos in total pmmr.push(&elems[3]).unwrap(); let pos_4 = elems[3].hash_with_index(4); - assert_eq!(pmmr.get_hash(5).unwrap(), pos_4); + assert_eq!(pmmr.get_hash(4).unwrap(), pos_4); let pos_5 = (pos_3, pos_4).hash_with_index(5); - assert_eq!(pmmr.get_hash(6).unwrap(), pos_5); + assert_eq!(pmmr.get_hash(5).unwrap(), pos_5); let pos_6 = (pos_2, pos_5).hash_with_index(6); - assert_eq!(pmmr.get_hash(7).unwrap(), pos_6); + assert_eq!(pmmr.get_hash(6).unwrap(), pos_6); pmmr.push(&elems[4]).unwrap(); let pos_7 = elems[4].hash_with_index(7); - assert_eq!(pmmr.get_hash(8).unwrap(), pos_7); + assert_eq!(pmmr.get_hash(7).unwrap(), pos_7); pmmr.push(&elems[5]).unwrap(); let pos_8 = elems[5].hash_with_index(8); - assert_eq!(pmmr.get_hash(9).unwrap(), pos_8); + assert_eq!(pmmr.get_hash(8).unwrap(), pos_8); let pos_9 = (pos_7, pos_8).hash_with_index(9); - assert_eq!(pmmr.get_hash(10).unwrap(), pos_9); + assert_eq!(pmmr.get_hash(9).unwrap(), pos_9); pmmr.push(&elems[6]).unwrap(); let pos_10 = elems[6].hash_with_index(10); - assert_eq!(pmmr.get_hash(11).unwrap(), pos_10); + assert_eq!(pmmr.get_hash(10).unwrap(), pos_10); assert_eq!(pmmr.unpruned_size(), 11); - let proof = pmmr.merkle_proof(1).unwrap(); + let proof = pmmr.merkle_proof(0).unwrap(); assert_eq!( proof.path, vec![pos_1, pos_5, (pos_9, pos_10).hash_with_index(11)] ); - assert!(proof.verify(pmmr.root().unwrap(), &elems[0], 1).is_ok()); + assert!(proof.verify(pmmr.root().unwrap(), &elems[0], 0).is_ok()); - let proof = pmmr.merkle_proof(2).unwrap(); + let proof = pmmr.merkle_proof(1).unwrap(); assert_eq!( proof.path, vec![pos_0, pos_5, (pos_9, pos_10).hash_with_index(11)] ); - assert!(proof.verify(pmmr.root().unwrap(), &elems[1], 2).is_ok()); + assert!(proof.verify(pmmr.root().unwrap(), &elems[1], 1).is_ok()); - let proof = pmmr.merkle_proof(4).unwrap(); + let proof = pmmr.merkle_proof(3).unwrap(); assert_eq!( proof.path, vec![pos_4, pos_2, (pos_9, pos_10).hash_with_index(11)] ); - assert!(proof.verify(pmmr.root().unwrap(), &elems[2], 4).is_ok()); + assert!(proof.verify(pmmr.root().unwrap(), &elems[2], 3).is_ok()); - let proof = pmmr.merkle_proof(5).unwrap(); + let proof = pmmr.merkle_proof(4).unwrap(); assert_eq!( proof.path, vec![pos_3, pos_2, (pos_9, pos_10).hash_with_index(11)] ); - assert!(proof.verify(pmmr.root().unwrap(), &elems[3], 5).is_ok()); + assert!(proof.verify(pmmr.root().unwrap(), &elems[3], 4).is_ok()); - let proof = pmmr.merkle_proof(8).unwrap(); + let proof = pmmr.merkle_proof(7).unwrap(); assert_eq!(proof.path, vec![pos_8, pos_10, pos_6]); - assert!(proof.verify(pmmr.root().unwrap(), &elems[4], 8).is_ok()); + assert!(proof.verify(pmmr.root().unwrap(), &elems[4], 7).is_ok()); - let proof = pmmr.merkle_proof(9).unwrap(); + let proof = pmmr.merkle_proof(8).unwrap(); assert_eq!(proof.path, vec![pos_7, pos_10, pos_6]); - assert!(proof.verify(pmmr.root().unwrap(), &elems[5], 9).is_ok()); + assert!(proof.verify(pmmr.root().unwrap(), &elems[5], 8).is_ok()); - let proof = pmmr.merkle_proof(11).unwrap(); + let proof = pmmr.merkle_proof(10).unwrap(); assert_eq!(proof.path, vec![pos_9, pos_6]); - assert!(proof.verify(pmmr.root().unwrap(), &elems[6], 11).is_ok()); + assert!(proof.verify(pmmr.root().unwrap(), &elems[6], 10).is_ok()); } diff --git a/core/tests/pmmr.rs b/core/tests/pmmr.rs index 1e02ea135d..e515bf9909 100644 --- a/core/tests/pmmr.rs +++ b/core/tests/pmmr.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,12 +15,11 @@ mod common; use self::core::core::hash::Hash; -use self::core::core::pmmr::{self, VecBackend, PMMR}; +use self::core::core::pmmr::{self, ReadablePMMR, VecBackend, PMMR}; use self::core::ser::PMMRIndexHashable; use crate::common::TestElem; use chrono::prelude::Utc; use grin_core as core; -use std::u64; #[test] fn some_peak_map() { @@ -44,11 +43,11 @@ fn bench_peak_map() { let increments = vec![1_000_000u64, 10_000_000u64, 100_000_000u64]; for v in increments { - let start = Utc::now().timestamp_nanos(); + let start = Utc::now().timestamp_nanos_opt().unwrap(); for i in 0..v { let _ = pmmr::peak_map_height(i); } - let fin = Utc::now().timestamp_nanos(); + let fin = Utc::now().timestamp_nanos_opt().unwrap(); let dur_ms = (fin - start) as f64 * nano_to_millis; println!("{:9?} peak_map_height() in {:9.3?}ms", v, dur_ms); } @@ -77,7 +76,7 @@ fn first_100_mmr_heights() { 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 4 5 \ 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 4 0 0 1 0 0"; let first_100 = first_100_str.split(' ').map(|n| n.parse::().unwrap()); - let mut count = 1; + let mut count = 0; for n in first_100 { assert_eq!( n, @@ -90,17 +89,27 @@ fn first_100_mmr_heights() { } } +#[test] +fn test_bintree_range() { + assert_eq!(pmmr::bintree_range(0), 0..1); + assert_eq!(pmmr::bintree_range(1), 1..2); + assert_eq!(pmmr::bintree_range(2), 0..3); + assert_eq!(pmmr::bintree_range(3), 3..4); + assert_eq!(pmmr::bintree_range(4), 4..5); + assert_eq!(pmmr::bintree_range(5), 3..6); + assert_eq!(pmmr::bintree_range(6), 0..7); +} + // The pos of the rightmost leaf for the provided MMR size (last leaf in subtree). #[test] fn test_bintree_rightmost() { assert_eq!(pmmr::bintree_rightmost(0), 0); assert_eq!(pmmr::bintree_rightmost(1), 1); - assert_eq!(pmmr::bintree_rightmost(2), 2); - assert_eq!(pmmr::bintree_rightmost(3), 2); + assert_eq!(pmmr::bintree_rightmost(2), 1); + assert_eq!(pmmr::bintree_rightmost(3), 3); assert_eq!(pmmr::bintree_rightmost(4), 4); - assert_eq!(pmmr::bintree_rightmost(5), 5); - assert_eq!(pmmr::bintree_rightmost(6), 5); - assert_eq!(pmmr::bintree_rightmost(7), 5); + assert_eq!(pmmr::bintree_rightmost(5), 4); + assert_eq!(pmmr::bintree_rightmost(6), 4); } // The pos of the leftmost leaf for the provided MMR size (first leaf in subtree). @@ -108,19 +117,88 @@ fn test_bintree_rightmost() { fn test_bintree_leftmost() { assert_eq!(pmmr::bintree_leftmost(0), 0); assert_eq!(pmmr::bintree_leftmost(1), 1); - assert_eq!(pmmr::bintree_leftmost(2), 2); - assert_eq!(pmmr::bintree_leftmost(3), 1); + assert_eq!(pmmr::bintree_leftmost(2), 0); + assert_eq!(pmmr::bintree_leftmost(3), 3); assert_eq!(pmmr::bintree_leftmost(4), 4); - assert_eq!(pmmr::bintree_leftmost(5), 5); - assert_eq!(pmmr::bintree_leftmost(6), 4); - assert_eq!(pmmr::bintree_leftmost(7), 1); + assert_eq!(pmmr::bintree_leftmost(5), 3); + assert_eq!(pmmr::bintree_leftmost(6), 0); +} + +#[test] +fn test_bintree_leaf_pos_iter() { + assert_eq!(pmmr::bintree_leaf_pos_iter(0).collect::>(), [0]); + assert_eq!(pmmr::bintree_leaf_pos_iter(1).collect::>(), [1]); + assert_eq!(pmmr::bintree_leaf_pos_iter(2).collect::>(), [0, 1]); + assert_eq!(pmmr::bintree_leaf_pos_iter(3).collect::>(), [3]); + assert_eq!(pmmr::bintree_leaf_pos_iter(4).collect::>(), [4]); + assert_eq!(pmmr::bintree_leaf_pos_iter(5).collect::>(), [3, 4]); + assert_eq!( + pmmr::bintree_leaf_pos_iter(6).collect::>(), + [0, 1, 3, 4] + ); +} + +#[test] +fn test_bintree_pos_iter() { + assert_eq!(pmmr::bintree_pos_iter(0).collect::>(), [0]); + assert_eq!(pmmr::bintree_pos_iter(1).collect::>(), [1]); + assert_eq!(pmmr::bintree_pos_iter(2).collect::>(), [0, 1, 2]); + assert_eq!(pmmr::bintree_pos_iter(3).collect::>(), [3]); + assert_eq!(pmmr::bintree_pos_iter(4).collect::>(), [4]); + assert_eq!(pmmr::bintree_pos_iter(5).collect::>(), [3, 4, 5]); + assert_eq!( + pmmr::bintree_pos_iter(6).collect::>(), + [0, 1, 2, 3, 4, 5, 6] + ); +} + +#[test] +fn test_is_leaf() { + assert_eq!(pmmr::is_leaf(0), true); + assert_eq!(pmmr::is_leaf(1), true); + assert_eq!(pmmr::is_leaf(2), false); + assert_eq!(pmmr::is_leaf(3), true); + assert_eq!(pmmr::is_leaf(4), true); + assert_eq!(pmmr::is_leaf(5), false); + assert_eq!(pmmr::is_leaf(6), false); +} + +#[test] +fn test_pmmr_leaf_to_insertion_index() { + assert_eq!(pmmr::pmmr_leaf_to_insertion_index(0), Some(0)); + assert_eq!(pmmr::pmmr_leaf_to_insertion_index(1), Some(1)); + assert_eq!(pmmr::pmmr_leaf_to_insertion_index(3), Some(2)); + assert_eq!(pmmr::pmmr_leaf_to_insertion_index(4), Some(3)); + assert_eq!(pmmr::pmmr_leaf_to_insertion_index(7), Some(4)); + assert_eq!(pmmr::pmmr_leaf_to_insertion_index(8), Some(5)); + assert_eq!(pmmr::pmmr_leaf_to_insertion_index(10), Some(6)); + assert_eq!(pmmr::pmmr_leaf_to_insertion_index(11), Some(7)); + assert_eq!(pmmr::pmmr_leaf_to_insertion_index(15), Some(8)); + assert_eq!(pmmr::pmmr_leaf_to_insertion_index(16), Some(9)); + assert_eq!(pmmr::pmmr_leaf_to_insertion_index(18), Some(10)); + assert_eq!(pmmr::pmmr_leaf_to_insertion_index(19), Some(11)); + assert_eq!(pmmr::pmmr_leaf_to_insertion_index(22), Some(12)); + assert_eq!(pmmr::pmmr_leaf_to_insertion_index(23), Some(13)); + assert_eq!(pmmr::pmmr_leaf_to_insertion_index(25), Some(14)); + assert_eq!(pmmr::pmmr_leaf_to_insertion_index(26), Some(15)); + assert_eq!(pmmr::pmmr_leaf_to_insertion_index(31), Some(16)); + + // Not a leaf node + assert_eq!(pmmr::pmmr_leaf_to_insertion_index(30), None); + + // Sanity check to make sure we don't get an explosion around the u64 max + // number of leaves + let n_leaves_max_u64 = pmmr::n_leaves(u64::MAX - 257); + assert_eq!( + pmmr::pmmr_leaf_to_insertion_index(n_leaves_max_u64), + Some(4611686018427387884) + ); } #[test] fn test_n_leaves() { // make sure we handle an empty MMR correctly assert_eq!(pmmr::n_leaves(0), 0); - // and various sizes on non-empty MMRs assert_eq!(pmmr::n_leaves(1), 1); assert_eq!(pmmr::n_leaves(2), 2); @@ -134,57 +212,65 @@ fn test_n_leaves() { assert_eq!(pmmr::n_leaves(10), 6); } -/// Find parent and sibling positions for various node positions. #[test] -fn various_families() { - // 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 - assert_eq!(pmmr::family(1), (3, 2)); - assert_eq!(pmmr::family(2), (3, 1)); - assert_eq!(pmmr::family(3), (7, 6)); - assert_eq!(pmmr::family(4), (6, 5)); - assert_eq!(pmmr::family(5), (6, 4)); - assert_eq!(pmmr::family(6), (7, 3)); - assert_eq!(pmmr::family(7), (15, 14)); - assert_eq!(pmmr::family(1_000), (1_001, 997)); +fn test_round_up_to_leaf_pos() { + assert_eq!(pmmr::round_up_to_leaf_pos(0), 0); + assert_eq!(pmmr::round_up_to_leaf_pos(1), 1); + assert_eq!(pmmr::round_up_to_leaf_pos(2), 3); + assert_eq!(pmmr::round_up_to_leaf_pos(3), 3); + assert_eq!(pmmr::round_up_to_leaf_pos(4), 4); + assert_eq!(pmmr::round_up_to_leaf_pos(5), 7); + assert_eq!(pmmr::round_up_to_leaf_pos(6), 7); + assert_eq!(pmmr::round_up_to_leaf_pos(7), 7); + assert_eq!(pmmr::round_up_to_leaf_pos(8), 8); + assert_eq!(pmmr::round_up_to_leaf_pos(9), 10); + assert_eq!(pmmr::round_up_to_leaf_pos(10), 10); } +/// Find parent and sibling positions for various node positions. #[test] -fn test_paths() { - assert_eq!(pmmr::path(1, 3).collect::>(), [1, 3]); - assert_eq!(pmmr::path(2, 3).collect::>(), [2, 3]); - assert_eq!(pmmr::path(4, 16).collect::>(), [4, 6, 7, 15]); +fn various_families() { + // 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 + assert_eq!(pmmr::family(0), (2, 1)); + assert_eq!(pmmr::family(1), (2, 0)); + assert_eq!(pmmr::family(2), (6, 5)); + assert_eq!(pmmr::family(3), (5, 4)); + assert_eq!(pmmr::family(4), (5, 3)); + assert_eq!(pmmr::family(5), (6, 2)); + assert_eq!(pmmr::family(6), (14, 13)); + assert_eq!(pmmr::family(999), (1_000, 996)); } #[test] fn test_is_left_sibling() { - assert_eq!(pmmr::is_left_sibling(1), true); - assert_eq!(pmmr::is_left_sibling(2), false); - assert_eq!(pmmr::is_left_sibling(3), true); + assert_eq!(pmmr::is_left_sibling(0), true); + assert_eq!(pmmr::is_left_sibling(1), false); + assert_eq!(pmmr::is_left_sibling(2), true); } #[test] fn various_branches() { // the two leaf nodes in a 3 node tree (height 1) - assert_eq!(pmmr::family_branch(1, 3), [(3, 2)]); - assert_eq!(pmmr::family_branch(2, 3), [(3, 1)]); + assert_eq!(pmmr::family_branch(0, 3), [(2, 1)]); + assert_eq!(pmmr::family_branch(1, 3), [(2, 0)]); // the root node in a 3 node tree - assert_eq!(pmmr::family_branch(3, 3), []); + assert_eq!(pmmr::family_branch(2, 3), []); // leaf node in a larger tree of 7 nodes (height 2) - assert_eq!(pmmr::family_branch(1, 7), [(3, 2), (7, 6)]); + assert_eq!(pmmr::family_branch(0, 7), [(2, 1), (6, 5)]); // note these only go as far up as the local peak, not necessarily the single // root - assert_eq!(pmmr::family_branch(1, 4), [(3, 2)]); + assert_eq!(pmmr::family_branch(0, 4), [(2, 1)]); // pos 4 in a tree of size 4 is a local peak - assert_eq!(pmmr::family_branch(4, 4), []); + assert_eq!(pmmr::family_branch(3, 4), []); // pos 4 in a tree of size 5 is also still a local peak - assert_eq!(pmmr::family_branch(4, 5), []); + assert_eq!(pmmr::family_branch(3, 5), []); // pos 4 in a tree of size 6 has a parent and a sibling - assert_eq!(pmmr::family_branch(4, 6), [(6, 5)]); + assert_eq!(pmmr::family_branch(3, 6), [(5, 4)]); // a tree of size 7 is all under a single root - assert_eq!(pmmr::family_branch(4, 7), [(6, 5), (7, 3)]); + assert_eq!(pmmr::family_branch(3, 7), [(5, 4), (6, 2)]); // ok now for a more realistic one, a tree with over a million nodes in it // find the "family path" back up the tree from a leaf node at 0 @@ -193,27 +279,27 @@ fn various_branches() { // largest possible list of peaks before we start combining them into larger // peaks. assert_eq!( - pmmr::family_branch(1, 1_049_000), + pmmr::family_branch(0, 1_049_000), [ - (3, 2), - (7, 6), - (15, 14), - (31, 30), - (63, 62), - (127, 126), - (255, 254), - (511, 510), - (1023, 1022), - (2047, 2046), - (4095, 4094), - (8191, 8190), - (16383, 16382), - (32767, 32766), - (65535, 65534), - (131071, 131070), - (262143, 262142), - (524287, 524286), - (1048575, 1048574), + (2, 1), + (6, 5), + (14, 13), + (30, 29), + (62, 61), + (126, 125), + (254, 253), + (510, 509), + (1022, 1021), + (2046, 2045), + (4094, 4093), + (8190, 8189), + (16382, 16381), + (32766, 32765), + (65534, 65533), + (131070, 131069), + (262142, 262141), + (524286, 524285), + (1048574, 1048573), ] ); } @@ -228,21 +314,21 @@ fn some_peaks() { assert_eq!(pmmr::peaks(0), empty); // and various non-empty MMRs - assert_eq!(pmmr::peaks(1), [1]); + assert_eq!(pmmr::peaks(1), [0]); assert_eq!(pmmr::peaks(2), empty); - assert_eq!(pmmr::peaks(3), [3]); - assert_eq!(pmmr::peaks(4), [3, 4]); + assert_eq!(pmmr::peaks(3), [2]); + assert_eq!(pmmr::peaks(4), [2, 3]); assert_eq!(pmmr::peaks(5), empty); assert_eq!(pmmr::peaks(6), empty); - assert_eq!(pmmr::peaks(7), [7]); - assert_eq!(pmmr::peaks(8), [7, 8]); + assert_eq!(pmmr::peaks(7), [6]); + assert_eq!(pmmr::peaks(8), [6, 7]); assert_eq!(pmmr::peaks(9), empty); - assert_eq!(pmmr::peaks(10), [7, 10]); - assert_eq!(pmmr::peaks(11), [7, 10, 11]); - assert_eq!(pmmr::peaks(22), [15, 22]); - assert_eq!(pmmr::peaks(32), [31, 32]); - assert_eq!(pmmr::peaks(35), [31, 34, 35]); - assert_eq!(pmmr::peaks(42), [31, 38, 41, 42]); + assert_eq!(pmmr::peaks(10), [6, 9]); + assert_eq!(pmmr::peaks(11), [6, 9, 10]); + assert_eq!(pmmr::peaks(22), [14, 21]); + assert_eq!(pmmr::peaks(32), [30, 31]); + assert_eq!(pmmr::peaks(35), [30, 33, 34]); + assert_eq!(pmmr::peaks(42), [30, 37, 40, 41]); // large realistic example with almost 1.5 million nodes // note the distance between peaks decreases toward the right (trees get @@ -250,8 +336,8 @@ fn some_peaks() { assert_eq!( pmmr::peaks(1048555), [ - 524287, 786430, 917501, 983036, 1015803, 1032186, 1040377, 1044472, 1046519, 1047542, - 1048053, 1048308, 1048435, 1048498, 1048529, 1048544, 1048551, 1048554, 1048555, + 524286, 786429, 917500, 983035, 1015802, 1032185, 1040376, 1044471, 1046518, 1047541, + 1048052, 1048307, 1048434, 1048497, 1048528, 1048543, 1048550, 1048553, 1048554, ], ); } @@ -278,7 +364,7 @@ fn pmmr_push_root() { pmmr.push(&elems[0]).unwrap(); pmmr.dump(false); let pos_0 = elems[0].hash_with_index(0); - assert_eq!(pmmr.peaks().collect::>(), vec![pos_0]); + assert_eq!(pmmr.peaks(), vec![pos_0]); assert_eq!(pmmr.root().unwrap(), pos_0); assert_eq!(pmmr.unpruned_size(), 1); @@ -287,7 +373,7 @@ fn pmmr_push_root() { pmmr.dump(false); let pos_1 = elems[1].hash_with_index(1); let pos_2 = (pos_0, pos_1).hash_with_index(2); - assert_eq!(pmmr.peaks().collect::>(), vec![pos_2]); + assert_eq!(pmmr.peaks(), vec![pos_2]); assert_eq!(pmmr.root().unwrap(), pos_2); assert_eq!(pmmr.unpruned_size(), 3); @@ -295,7 +381,7 @@ fn pmmr_push_root() { pmmr.push(&elems[2]).unwrap(); pmmr.dump(false); let pos_3 = elems[2].hash_with_index(3); - assert_eq!(pmmr.peaks().collect::>(), vec![pos_2, pos_3]); + assert_eq!(pmmr.peaks(), vec![pos_2, pos_3]); assert_eq!(pmmr.root().unwrap(), (pos_2, pos_3).hash_with_index(4)); assert_eq!(pmmr.unpruned_size(), 4); @@ -305,7 +391,7 @@ fn pmmr_push_root() { let pos_4 = elems[3].hash_with_index(4); let pos_5 = (pos_3, pos_4).hash_with_index(5); let pos_6 = (pos_2, pos_5).hash_with_index(6); - assert_eq!(pmmr.peaks().collect::>(), vec![pos_6]); + assert_eq!(pmmr.peaks(), vec![pos_6]); assert_eq!(pmmr.root().unwrap(), pos_6); assert_eq!(pmmr.unpruned_size(), 7); @@ -313,7 +399,7 @@ fn pmmr_push_root() { pmmr.push(&elems[4]).unwrap(); pmmr.dump(false); let pos_7 = elems[4].hash_with_index(7); - assert_eq!(pmmr.peaks().collect::>(), vec![pos_6, pos_7]); + assert_eq!(pmmr.peaks(), vec![pos_6, pos_7]); assert_eq!(pmmr.root().unwrap(), (pos_6, pos_7).hash_with_index(8)); assert_eq!(pmmr.unpruned_size(), 8); @@ -321,14 +407,14 @@ fn pmmr_push_root() { pmmr.push(&elems[5]).unwrap(); let pos_8 = elems[5].hash_with_index(8); let pos_9 = (pos_7, pos_8).hash_with_index(9); - assert_eq!(pmmr.peaks().collect::>(), vec![pos_6, pos_9]); + assert_eq!(pmmr.peaks(), vec![pos_6, pos_9]); assert_eq!(pmmr.root().unwrap(), (pos_6, pos_9).hash_with_index(10)); assert_eq!(pmmr.unpruned_size(), 10); // seven elements pmmr.push(&elems[6]).unwrap(); let pos_10 = elems[6].hash_with_index(10); - assert_eq!(pmmr.peaks().collect::>(), vec![pos_6, pos_9, pos_10]); + assert_eq!(pmmr.peaks(), vec![pos_6, pos_9, pos_10]); assert_eq!( pmmr.root().unwrap(), (pos_6, (pos_9, pos_10).hash_with_index(11)).hash_with_index(11) @@ -342,14 +428,14 @@ fn pmmr_push_root() { let pos_12 = (pos_10, pos_11).hash_with_index(12); let pos_13 = (pos_9, pos_12).hash_with_index(13); let pos_14 = (pos_6, pos_13).hash_with_index(14); - assert_eq!(pmmr.peaks().collect::>(), vec![pos_14]); + assert_eq!(pmmr.peaks(), vec![pos_14]); assert_eq!(pmmr.root().unwrap(), pos_14); assert_eq!(pmmr.unpruned_size(), 15); // nine elements pmmr.push(&elems[8]).unwrap(); let pos_15 = elems[8].hash_with_index(15); - assert_eq!(pmmr.peaks().collect::>(), vec![pos_14, pos_15]); + assert_eq!(pmmr.peaks(), vec![pos_14, pos_15]); assert_eq!(pmmr.root().unwrap(), (pos_14, pos_15).hash_with_index(16)); assert_eq!(pmmr.unpruned_size(), 16); } @@ -437,7 +523,7 @@ fn pmmr_prune() { // pruning a leaf with no parent should do nothing { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut ba, sz); - pmmr.prune(16).unwrap(); + pmmr.prune(15).unwrap(); assert_eq!(orig_root, pmmr.root().unwrap()); } assert_eq!(ba.hashes.len(), 16); @@ -446,7 +532,7 @@ fn pmmr_prune() { // pruning leaves with no shared parent just removes 1 element { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut ba, sz); - pmmr.prune(2).unwrap(); + pmmr.prune(1).unwrap(); assert_eq!(orig_root, pmmr.root().unwrap()); } assert_eq!(ba.hashes.len(), 16); @@ -454,7 +540,7 @@ fn pmmr_prune() { { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut ba, sz); - pmmr.prune(4).unwrap(); + pmmr.prune(3).unwrap(); assert_eq!(orig_root, pmmr.root().unwrap()); } assert_eq!(ba.hashes.len(), 16); @@ -463,7 +549,7 @@ fn pmmr_prune() { // pruning a non-leaf node has no effect { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut ba, sz); - pmmr.prune(3).unwrap_err(); + pmmr.prune(2).unwrap_err(); assert_eq!(orig_root, pmmr.root().unwrap()); } assert_eq!(ba.hashes.len(), 16); @@ -472,7 +558,7 @@ fn pmmr_prune() { // TODO - no longer true (leaves only now) - pruning sibling removes subtree { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut ba, sz); - pmmr.prune(5).unwrap(); + pmmr.prune(4).unwrap(); assert_eq!(orig_root, pmmr.root().unwrap()); } assert_eq!(ba.hashes.len(), 16); @@ -482,7 +568,7 @@ fn pmmr_prune() { // removes all subtree { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut ba, sz); - pmmr.prune(1).unwrap(); + pmmr.prune(0).unwrap(); assert_eq!(orig_root, pmmr.root().unwrap()); } assert_eq!(ba.hashes.len(), 16); @@ -491,7 +577,7 @@ fn pmmr_prune() { // pruning everything should only leave us with a single peak { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut ba, sz); - for n in 1..16 { + for n in 0..15 { let _ = pmmr.prune(n); } assert_eq!(orig_root, pmmr.root().unwrap()); @@ -502,14 +588,14 @@ fn pmmr_prune() { #[test] fn check_insertion_to_pmmr_index() { + assert_eq!(pmmr::insertion_to_pmmr_index(0), 0); assert_eq!(pmmr::insertion_to_pmmr_index(1), 1); - assert_eq!(pmmr::insertion_to_pmmr_index(2), 2); + assert_eq!(pmmr::insertion_to_pmmr_index(2), 3); assert_eq!(pmmr::insertion_to_pmmr_index(3), 4); - assert_eq!(pmmr::insertion_to_pmmr_index(4), 5); + assert_eq!(pmmr::insertion_to_pmmr_index(4), 7); assert_eq!(pmmr::insertion_to_pmmr_index(5), 8); - assert_eq!(pmmr::insertion_to_pmmr_index(6), 9); + assert_eq!(pmmr::insertion_to_pmmr_index(6), 10); assert_eq!(pmmr::insertion_to_pmmr_index(7), 11); - assert_eq!(pmmr::insertion_to_pmmr_index(8), 12); } #[test] @@ -547,8 +633,8 @@ fn check_elements_from_pmmr_index() { assert_eq!(res.1[6].0[3], 11); // pruning a few nodes should get consistent results - pmmr.prune(pmmr::insertion_to_pmmr_index(5)).unwrap(); - pmmr.prune(pmmr::insertion_to_pmmr_index(20)).unwrap(); + pmmr.prune(pmmr::insertion_to_pmmr_index(4)).unwrap(); + pmmr.prune(pmmr::insertion_to_pmmr_index(19)).unwrap(); let res = pmmr .readonly_pmmr() diff --git a/core/tests/segment.rs b/core/tests/segment.rs new file mode 100644 index 0000000000..cb81402fac --- /dev/null +++ b/core/tests/segment.rs @@ -0,0 +1,61 @@ +// Copyright 2021 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod common; + +use self::core::core::pmmr; +use self::core::core::{Segment, SegmentIdentifier}; +use common::TestElem; +use grin_core as core; +use grin_core::core::pmmr::ReadablePMMR; + +fn test_unprunable_size(height: u8, n_leaves: u32) { + let size = 1u64 << height; + let n_segments = (n_leaves as u64 + size - 1) / size; + + // Build an MMR with n_leaves leaves + let mut ba = pmmr::VecBackend::new(); + let mut mmr = pmmr::PMMR::new(&mut ba); + for i in 0..n_leaves { + mmr.push(&TestElem([i / 7, i / 5, i / 3, i])).unwrap(); + } + let mmr = mmr.readonly_pmmr(); + let last_pos = mmr.unpruned_size(); + let root = mmr.root().unwrap(); + + for idx in 0..n_segments { + let id = SegmentIdentifier { height, idx }; + let segment = Segment::from_pmmr(id, &mmr, false).unwrap(); + println!( + "\n\n>>>>>>> N_LEAVES = {}, LAST_POS = {}, SEGMENT = {}:\n{:#?}", + n_leaves, last_pos, idx, segment + ); + if idx < n_segments - 1 || (n_leaves as u64) % size == 0 { + // Check if the reconstructed subtree root matches with the hash stored in the mmr + let subtree_root = segment.root(last_pos, None).unwrap().unwrap(); + let last = pmmr::insertion_to_pmmr_index((idx + 1) * size - 1) + (height as u64); + assert_eq!(subtree_root, mmr.get_hash(last).unwrap()); + println!(" ROOT OK"); + } + segment.validate(last_pos, None, root).unwrap(); + println!(" PROOF OK"); + } +} + +#[test] +fn unprunable_mmr() { + for i in 1..=64 { + test_unprunable_size(3, i); + } +} diff --git a/core/tests/transaction.rs b/core/tests/transaction.rs index 877d5a02a9..c649be0802 100644 --- a/core/tests/transaction.rs +++ b/core/tests/transaction.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,16 +17,15 @@ pub mod common; use crate::common::tx1i10_v2_compatible; use crate::core::core::transaction::{self, Error}; -use crate::core::core::verifier_cache::LruVerifierCache; -use crate::core::core::{KernelFeatures, Output, OutputFeatures, Transaction, Weighting}; +use crate::core::core::{ + FeeFields, KernelFeatures, Output, OutputFeatures, Transaction, TxKernel, Weighting, +}; use crate::core::global; -use crate::core::libtx::build; use crate::core::libtx::proof::{self, ProofBuilder}; +use crate::core::libtx::{build, tx_fee}; use crate::core::{consensus, ser}; use grin_core as core; use keychain::{ExtKeychain, Keychain}; -use std::sync::Arc; -use util::RwLock; // We use json serialization between wallet->node when pushing transactions to the network. // This test ensures we exercise this serialization/deserialization code. @@ -104,7 +103,9 @@ fn test_verify_cut_through_plain() -> Result<(), Error> { let builder = proof::ProofBuilder::new(&keychain); let mut tx = build::transaction( - KernelFeatures::Plain { fee: 0 }, + KernelFeatures::Plain { + fee: FeeFields::zero(), + }, &[ build::input(10, key_id1.clone()), build::input(10, key_id2.clone()), @@ -117,11 +118,10 @@ fn test_verify_cut_through_plain() -> Result<(), Error> { ) .expect("valid tx"); - let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); - // Transaction should fail validation due to cut-through. + let height = 42; // arbitrary assert_eq!( - tx.validate(Weighting::AsTransaction, verifier_cache.clone()), + tx.validate(Weighting::AsTransaction, height), Err(Error::CutThrough), ); @@ -139,7 +139,7 @@ fn test_verify_cut_through_plain() -> Result<(), Error> { .replace_outputs(outputs); // Transaction validates successfully after applying cut-through. - tx.validate(Weighting::AsTransaction, verifier_cache.clone())?; + tx.validate(Weighting::AsTransaction, height)?; // Transaction validates via lightweight "read" validation as well. tx.validate_read()?; @@ -163,7 +163,9 @@ fn test_verify_cut_through_coinbase() -> Result<(), Error> { let builder = ProofBuilder::new(&keychain); let mut tx = build::transaction( - KernelFeatures::Plain { fee: 0 }, + KernelFeatures::Plain { + fee: FeeFields::zero(), + }, &[ build::coinbase_input(consensus::MWC_FIRST_GROUP_REWARD, key_id1.clone()), build::coinbase_input(consensus::MWC_FIRST_GROUP_REWARD, key_id2.clone()), @@ -179,11 +181,10 @@ fn test_verify_cut_through_coinbase() -> Result<(), Error> { ) .expect("valid tx"); - let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); - // Transaction should fail validation due to cut-through. + let height = 42; // arbitrary assert_eq!( - tx.validate(Weighting::AsTransaction, verifier_cache.clone()), + tx.validate(Weighting::AsTransaction, height), Err(Error::CutThrough), ); @@ -201,10 +202,65 @@ fn test_verify_cut_through_coinbase() -> Result<(), Error> { .replace_outputs(outputs); // Transaction validates successfully after applying cut-through. - tx.validate(Weighting::AsTransaction, verifier_cache.clone())?; + tx.validate(Weighting::AsTransaction, height)?; // Transaction validates via lightweight "read" validation as well. tx.validate_read()?; Ok(()) } + +// Test coverage for FeeFields +#[test] +fn test_fee_fields() -> Result<(), Error> { + global::set_local_chain_type(global::ChainTypes::UserTesting); + global::set_local_accept_fee_base(500_000); + + let keychain = ExtKeychain::from_random_seed(false)?; + + let key_id1 = ExtKeychain::derive_key_id(1, 1, 0, 0, 0); + + let builder = ProofBuilder::new(&keychain); + + let mut tx = build::transaction( + KernelFeatures::Plain { + fee: FeeFields::new(1, 42).unwrap(), + }, + &[ + build::coinbase_input(consensus::calc_mwc_block_reward(1), key_id1.clone()), + build::output(60_000_000_000 - 84 - 42 - 21, key_id1.clone()), + ], + &keychain, + &builder, + ) + .expect("valid tx"); + + let hf2_height = 2 * consensus::TESTING_HARD_FORK_INTERVAL; + assert_eq!( + tx.accept_fee(hf2_height), + (1 * 1 + 1 * 21 + 1 * 3) * 500_000 + ); + assert_eq!(tx.fee(hf2_height), 42); + assert_eq!(tx.fee(hf2_height), 42); + assert_eq!(tx.shifted_fee(hf2_height), 21); + assert_eq!( + tx.accept_fee(hf2_height - 1), + (1 * 4 + 1 * 1 - 1 * 1) * 1_000_000 + ); + assert_eq!(tx.fee(hf2_height - 1), 42 + (1u64 << 40)); + assert_eq!(tx.shifted_fee(hf2_height - 1), 42 + (1u64 << 40)); + + tx.body.kernels.append(&mut vec![ + TxKernel::with_features(KernelFeatures::Plain { + fee: FeeFields::new(2, 84).unwrap(), + }), + TxKernel::with_features(KernelFeatures::Plain { fee: 21.into() }), + ]); + + assert_eq!(tx.fee(hf2_height), 147); + assert_eq!(tx.shifted_fee(hf2_height), 36); + assert_eq!(tx.aggregate_fee_fields(hf2_height), FeeFields::new(2, 147)); + assert_eq!(tx_fee(1, 1, 3), 15_500_000); + + Ok(()) +} diff --git a/core/tests/vec_backend.rs b/core/tests/vec_backend.rs index d0d313cd95..e9b5808d03 100644 --- a/core/tests/vec_backend.rs +++ b/core/tests/vec_backend.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ mod common; -use self::core::core::pmmr::{VecBackend, PMMR}; +use self::core::core::pmmr::{ReadablePMMR, VecBackend, PMMR}; use crate::common::TestElem; use grin_core as core; @@ -37,7 +37,7 @@ fn leaf_pos_and_idx_iter_test() { pmmr.leaf_idx_iter(0).collect::>() ); assert_eq!( - vec![1, 2, 4, 5, 8], + vec![0, 1, 3, 4, 7], pmmr.leaf_pos_iter().collect::>() ); } @@ -61,7 +61,7 @@ fn leaf_pos_and_idx_iter_hash_only_test() { pmmr.leaf_idx_iter(0).collect::>() ); assert_eq!( - vec![1, 2, 4, 5, 8], + vec![0, 1, 3, 4, 7], pmmr.leaf_pos_iter().collect::>() ); } diff --git a/core/tests/verifier_cache.rs b/core/tests/verifier_cache.rs deleted file mode 100644 index 92bc3763fc..0000000000 --- a/core/tests/verifier_cache.rs +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2020 The Grin Developers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod common; - -use self::core::core::verifier_cache::{LruVerifierCache, VerifierCache}; -use self::core::core::{Output, OutputFeatures}; -use self::core::libtx::proof; -use grin_core as core; -use keychain::{ExtKeychain, Keychain, SwitchCommitmentType}; -use std::sync::Arc; -use util::RwLock; - -fn verifier_cache() -> Arc> { - Arc::new(RwLock::new(LruVerifierCache::new())) -} - -#[test] -fn test_verifier_cache_rangeproofs() { - let cache = verifier_cache(); - - let keychain = ExtKeychain::from_random_seed(false).unwrap(); - let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0); - let switch = SwitchCommitmentType::Regular; - let commit = keychain.commit(5, &key_id, switch).unwrap(); - let builder = proof::ProofBuilder::new(&keychain); - let proof = proof::create(&keychain, &builder, 5, &key_id, switch, commit, None).unwrap(); - - let out = Output::new(OutputFeatures::Plain, commit, proof); - - // Check our output is not verified according to the cache. - { - let mut cache = cache.write(); - let unverified = cache.filter_rangeproof_unverified(&[out]); - assert_eq!(unverified, vec![out]); - } - - // Add our output to the cache. - { - let mut cache = cache.write(); - cache.add_rangeproof_verified(vec![out]); - } - - // Check it shows as verified according to the cache. - { - let mut cache = cache.write(); - let unverified = cache.filter_rangeproof_unverified(&[out]); - assert_eq!(unverified, vec![]); - } -} diff --git a/doc/intro.md b/doc/intro.md index caf56e83d8..c12469bd1c 100644 --- a/doc/intro.md +++ b/doc/intro.md @@ -26,7 +26,7 @@ The main goal and characteristics of the MWC project are: A detailed post on the step-by-step of how MWC transactions work (with graphics) can be found [in this Medium post](https://medium.com/@brandonarvanaghi/grin-transactions-explained-step-by-step-fdceb905a853). -## Tongue Tying for Everyone +## Tongue-Tying for Everyone This document is targeted at readers with a good understanding of blockchains and basic cryptography. With that in mind, we attempt @@ -292,7 +292,7 @@ Recall that a transaction consists of the following: * kernel excess (the public key corresponding to the excess value) * transaction signature signed by the excess value (and verifies with the kernel excess) -(the presentation above did not explicitly include the kernel excess in the transaction, because it can be computed from the inputs and outputs. This paragrpah shows the benefit in including it, for aggregation within block construction.) +(The presentation above did not explicitly include the kernel excess in the transaction, because it can be computed from the inputs and outputs. This paragrpah shows the benefit in including it, for aggregation within block construction.) We can say the following is true for any valid transaction (ignoring fees for simplicity): diff --git a/etc/gen_gen/src/bin/gen_gen.rs b/etc/gen_gen/src/bin/gen_gen.rs index 324891166f..40f79d6b9b 100644 --- a/etc/gen_gen/src/bin/gen_gen.rs +++ b/etc/gen_gen/src/bin/gen_gen.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -33,7 +33,6 @@ use grin_util::{self as util, ToHex}; use grin_wallet as wallet; use grin_core::core::hash::Hashed; -use grin_core::core::verifier_cache::LruVerifierCache; use grin_keychain::{BlindingFactor, ExtKeychain, Keychain}; static BCHAIN_INFO_URL: &str = "https://blockchain.info/latestblock"; @@ -141,11 +140,7 @@ fn main() { assert!(gen.header.pow.is_secondary(), "Not a secondary header"); println!("Built genesis:\n{:?}", gen); core::pow::verify_size(&gen.header).unwrap(); - gen.validate( - &BlindingFactor::zero(), - Arc::new(util::RwLock::new(LruVerifierCache::new())), - ) - .unwrap(); + gen.validate(&BlindingFactor::zero()).unwrap(); println!("\nFinal genesis cyclehash: {}", gen.hash().to_hex()); let gen_bin = core::ser::ser_vec(&gen).unwrap(); @@ -269,9 +264,6 @@ fn update_genesis_rs(gen: &core::core::Block) { fn setup_chain(dir_name: &str, genesis: core::core::Block) -> chain::Chain { util::init_test_logger(); let _ = fs::remove_dir_all(dir_name); - let verifier_cache = Arc::new(util::RwLock::new( - core::core::verifier_cache::LruVerifierCache::new(), - )); let db_env = Arc::new(store::new_env(dir_name.to_string())); chain::Chain::init( dir_name.to_string(), @@ -279,7 +271,6 @@ fn setup_chain(dir_name: &str, genesis: core::core::Block) -> chain::Chain { Arc::new(chain::types::NoopAdapter {}), genesis, core::pow::verify_size, - verifier_cache, false, Arc::new(util::StopState::new()), ) diff --git a/keychain/Cargo.toml b/keychain/Cargo.toml index aa5071cbbe..2e1e12c8d6 100644 --- a/keychain/Cargo.toml +++ b/keychain/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "grin_keychain" -version = "4.4.2" +version = "5.3.2" authors = ["Grin Developers "] description = "Chain implementation for grin, a simple, private and scalable cryptocurrency implementation based on the MimbleWimble chain format." license = "Apache-2.0" @@ -19,8 +19,7 @@ serde_derive = "1" serde_json = "1" lazy_static = "1" zeroize = { version = "1.1", features =["zeroize_derive"] } -failure = "0.1" -failure_derive = "0.1" +thiserror = "1" digest = "0.9" hmac = "0.11" @@ -28,4 +27,4 @@ ripemd160 = "0.9" sha2 = "0.9" pbkdf2 = "0.8" -grin_util = { path = "../util", version = "4.4.2" } +grin_util = { path = "../util", version = "5.3.2" } diff --git a/keychain/src/base58.rs b/keychain/src/base58.rs index a5d1221e59..02689e600f 100644 --- a/keychain/src/base58.rs +++ b/keychain/src/base58.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -29,7 +29,6 @@ //! Base58 encoder and decoder use digest::Digest; -use failure::Fail; use sha2::Sha256; use std::{fmt, str}; @@ -53,30 +52,27 @@ pub fn into_le_low_u32(data: &[u8; 32]) -> u32 { } /// An error that might occur during base58 decoding -#[derive(Fail, Debug, PartialEq, Eq, Clone)] +#[derive(thiserror::Error, Debug, PartialEq, Eq, Clone)] pub enum Error { /// Invalid character encountered - #[fail(display = "invalid base58 character 0x{:x}", _0)] + #[error("invalid base58 character 0x{0:x}")] BadByte(u8), /// Checksum was not correct (expected, actual) - #[fail( - display = "base58ck checksum 0x{:x} does not match expected 0x{:x}", - _0, _1 - )] + #[error("base58ck checksum 0x{0:x} does not match expected 0x{1:x}")] BadChecksum(u32, u32), /// The length (in bytes) of the object was not correct /// Note that if the length is excessively long the provided length may be /// an estimate (and the checksum step may be skipped). - #[fail(display = "length {} invalid for this base58 type", _0)] + #[error("length {0} invalid for this base58 type")] InvalidLength(usize), /// Version byte(s) were not recognized - #[fail(display = "version {:?} invalid for this base58 type", _0)] + #[error("version {0:?} invalid for this base58 type")] InvalidVersion(Vec), /// Checked data was less than 4 bytes - #[fail(display = "b58ck checksum less than 4 bytes, get {}", _0)] + #[error("b58ck checksum less than 4 bytes, get {0}")] TooShort(usize), /// Any other error - #[fail(display = "base58 error, {}", _0)] + #[error("base58 error, {0}")] Other(String), } diff --git a/keychain/src/extkey_bip32.rs b/keychain/src/extkey_bip32.rs index 16297f6905..9d615332dc 100644 --- a/keychain/src/extkey_bip32.rs +++ b/keychain/src/extkey_bip32.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -44,7 +44,6 @@ use byteorder::{BigEndian, ByteOrder, ReadBytesExt}; use digest::generic_array::GenericArray; use digest::Digest; -use failure::Fail; use hmac::{Hmac, Mac, NewMac}; use ripemd160::Ripemd160; use sha2::{Sha256, Sha512}; @@ -132,7 +131,7 @@ impl BIP32Hasher for BIP32GrinHasher { } fn result_sha512(&mut self) -> [u8; 64] { let mut result = [0; 64]; - result.copy_from_slice(self.hmac_sha512.clone().finalize().into_bytes().as_slice()); + result.copy_from_slice(&self.hmac_sha512.to_owned().finalize().into_bytes()); result } fn sha_256(&self, input: &[u8]) -> [u8; 32] { @@ -298,19 +297,19 @@ impl serde::Serialize for ChildNumber { } /// A BIP32 error -#[derive(Fail, Clone, PartialEq, Eq, Debug, Serialize, Deserialize)] +#[derive(thiserror::Error, Clone, PartialEq, Eq, Debug, Serialize, Deserialize)] pub enum Error { /// A pk->pk derivation was attempted on a hardened key - #[fail(display = "cannot derive hardened key from public key")] + #[error("cannot derive hardened key from public key")] CannotDeriveFromHardenedKey, /// A secp256k1 error occured - #[fail(display = "secp256k1 error {}", _0)] + #[error("secp256k1 error {0}")] Ecdsa(secp::Error), /// Error creating a master seed --- for application use - #[fail(display = "rng error {}", _0)] + #[error("rng error {0}")] RngError(String), /// Error converting mnemonic to seed - #[fail(display = "Mnemonic error, {}", _0)] + #[error("Mnemonic error, {0}")] MnemonicError(mnemonic::Error), } @@ -323,7 +322,7 @@ impl From for Error { impl ExtendedPrivKey { /// Construct a new master key from a seed value pub fn new_master( - _secp: &Secp256k1, + secp: &Secp256k1, hasher: &mut H, seed: &[u8], ) -> Result @@ -339,7 +338,7 @@ impl ExtendedPrivKey { depth: 0, parent_fingerprint: Default::default(), child_number: ChildNumber::from_normal_idx(0), - secret_key: SecretKey::from_slice(&result[..32]).map_err(Error::Ecdsa)?, + secret_key: SecretKey::from_slice(secp, &result[..32]).map_err(Error::Ecdsa)?, chain_code: ChainCode::from(&result[32..]), }) } @@ -390,7 +389,8 @@ impl ExtendedPrivKey { ChildNumber::Normal { .. } => { // Non-hardened key: compute public data and use that hasher.append_sha512( - &PublicKey::from_secret_key(secp, &self.secret_key)?.serialize_vec(true)[..], + &PublicKey::from_secret_key(secp, &self.secret_key)?.serialize_vec(secp, true) + [..], ); } ChildNumber::Hardened { .. } => { @@ -403,8 +403,9 @@ impl ExtendedPrivKey { hasher.append_sha512(&be_n); let result = hasher.result_sha512(); - let mut sk = SecretKey::from_slice(&result[..32]).map_err(Error::Ecdsa)?; - sk.add_assign(&self.secret_key).map_err(Error::Ecdsa)?; + let mut sk = SecretKey::from_slice(secp, &result[..32]).map_err(Error::Ecdsa)?; + sk.add_assign(secp, &self.secret_key) + .map_err(Error::Ecdsa)?; Ok(ExtendedPrivKey { network: self.network, @@ -425,7 +426,7 @@ impl ExtendedPrivKey { // Compute extended public key let pk: ExtendedPubKey = ExtendedPubKey::from_private::(&secp, self, hasher); // Do SHA256 of just the ECDSA pubkey - let sha2_res = hasher.sha_256(&pk.public_key.serialize_vec(true)[..]); + let sha2_res = hasher.sha_256(&pk.public_key.serialize_vec(&secp, true)[..]); // do RIPEMD160 hasher.ripemd_160(&sha2_res) } @@ -475,7 +476,7 @@ impl ExtendedPubKey { /// Compute the scalar tweak added to this key to get a child key pub fn ckd_pub_tweak( &self, - _secp: &Secp256k1, + secp: &Secp256k1, hasher: &mut H, i: ChildNumber, ) -> Result<(SecretKey, ChainCode), Error> @@ -486,14 +487,14 @@ impl ExtendedPubKey { ChildNumber::Hardened { .. } => Err(Error::CannotDeriveFromHardenedKey), ChildNumber::Normal { index: n } => { hasher.init_sha512(&self.chain_code[..]); - hasher.append_sha512(&self.public_key.serialize_vec(true)[..]); + hasher.append_sha512(&self.public_key.serialize_vec(secp, true)[..]); let mut be_n = [0; 4]; BigEndian::write_u32(&mut be_n, n); hasher.append_sha512(&be_n); let result = hasher.result_sha512(); - let secret_key = SecretKey::from_slice(&result[..32])?; + let secret_key = SecretKey::from_slice(secp, &result[..32])?; let chain_code = ChainCode::from(&result[32..]); Ok((secret_key, chain_code)) } @@ -525,12 +526,12 @@ impl ExtendedPubKey { } /// Returns the HASH160 of the chaincode - pub fn identifier(&self, _secp: &Secp256k1, hasher: &mut H) -> [u8; 20] + pub fn identifier(&self, secp: &Secp256k1, hasher: &mut H) -> [u8; 20] where H: BIP32Hasher, { // Do SHA256 of just the ECDSA pubkey - let sha2_res = hasher.sha_256(&self.public_key.serialize_vec(true)[..]); + let sha2_res = hasher.sha_256(&self.public_key.serialize_vec(secp, true)[..]); // do RIPEMD160 hasher.ripemd_160(&sha2_res) } @@ -564,6 +565,7 @@ impl FromStr for ExtendedPrivKey { type Err = base58::Error; fn from_str(inp: &str) -> Result { + let s = Secp256k1::without_caps(); let data = base58::from_check(inp)?; if data.len() != 78 { @@ -584,7 +586,7 @@ impl FromStr for ExtendedPrivKey { parent_fingerprint: Fingerprint::from(&data[5..9]), child_number: child_number, chain_code: ChainCode::from(&data[13..45]), - secret_key: SecretKey::from_slice(&data[46..78]) + secret_key: SecretKey::from_slice(&s, &data[46..78]) .map_err(|e| base58::Error::Other(format!("Unable to read priv key, {}", e)))?, }) } @@ -592,6 +594,7 @@ impl FromStr for ExtendedPrivKey { impl fmt::Display for ExtendedPubKey { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + let secp = Secp256k1::without_caps(); let mut ret = [0; 78]; ret[0..4].copy_from_slice(&self.network[0..4]); ret[4] = self.depth as u8; @@ -600,7 +603,7 @@ impl fmt::Display for ExtendedPubKey { BigEndian::write_u32(&mut ret[9..13], u32::from(self.child_number)); ret[13..45].copy_from_slice(&self.chain_code[..]); - ret[45..78].copy_from_slice(&self.public_key.serialize_vec(true)[..]); + ret[45..78].copy_from_slice(&self.public_key.serialize_vec(&secp, true)[..]); fmt.write_str(&base58::check_encode_slice(&ret[..])) } } @@ -609,6 +612,7 @@ impl FromStr for ExtendedPubKey { type Err = base58::Error; fn from_str(inp: &str) -> Result { + let s = Secp256k1::without_caps(); let data = base58::from_check(inp)?; if data.len() != 78 { @@ -629,7 +633,7 @@ impl FromStr for ExtendedPubKey { parent_fingerprint: Fingerprint::from(&data[5..9]), child_number: child_number, chain_code: ChainCode::from(&data[13..45]), - public_key: PublicKey::from_slice(&data[45..78]) + public_key: PublicKey::from_slice(&s, &data[45..78]) .map_err(|e| base58::Error::Other(format!("Unable to read pub key, {}", e)))?, }) } @@ -648,7 +652,7 @@ mod tests { use digest::generic_array::GenericArray; use digest::Digest; - use hmac::{Hmac, Mac, NewMac}; + use hmac::{Hmac, Mac}; use ripemd160::Ripemd160; use sha2::{Sha256, Sha512}; @@ -687,7 +691,7 @@ mod tests { } fn result_sha512(&mut self) -> [u8; 64] { let mut result = [0; 64]; - result.copy_from_slice(self.hmac_sha512.clone().finalize().into_bytes().as_slice()); + result.copy_from_slice(&self.hmac_sha512.to_owned().finalize().into_bytes()); result } fn sha_256(&self, input: &[u8]) -> [u8; 32] { diff --git a/keychain/src/keychain.rs b/keychain/src/keychain.rs index 9973e8f77c..810feef079 100644 --- a/keychain/src/keychain.rs +++ b/keychain/src/keychain.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -166,18 +166,18 @@ impl Keychain for ExtKeychain { let keys = blind_sum .positive_blinding_factors .iter() - .filter_map(|b| b.secret_key().ok()) + .filter_map(|b| b.secret_key(&self.secp).ok()) .collect::>(); pos_keys.extend(keys); let keys = blind_sum .negative_blinding_factors .iter() - .filter_map(|b| b.secret_key().ok()) + .filter_map(|b| b.secret_key(&self.secp).ok()) .collect::>(); neg_keys.extend(keys); - let sum = secp::Secp256k1::blind_sum(pos_keys, neg_keys)?; + let sum = self.secp.blind_sum(pos_keys, neg_keys)?; Ok(BlindingFactor::from_secret_key(sum)) } @@ -198,7 +198,7 @@ impl Keychain for ExtKeychain { msg: &Message, blinding: &BlindingFactor, ) -> Result { - let skey = &blinding.secret_key()?; + let skey = &blinding.secret_key(&self.secp)?; let sig = self.secp.sign(msg, &skey)?; Ok(sig) } @@ -245,21 +245,27 @@ mod test { fn secret_key_addition() { let keychain = ExtKeychain::from_random_seed(false).unwrap(); - let skey1 = SecretKey::from_slice(&[ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1, - ]) + let skey1 = SecretKey::from_slice( + &keychain.secp, + &[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, + ], + ) .unwrap(); - let skey2 = SecretKey::from_slice(&[ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 2, - ]) + let skey2 = SecretKey::from_slice( + &keychain.secp, + &[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 2, + ], + ) .unwrap(); // adding secret keys 1 and 2 to give secret key 3 let mut skey3 = skey1.clone(); - skey3.add_assign(&skey2).unwrap(); + skey3.add_assign(&keychain.secp, &skey2).unwrap(); // create commitments for secret keys 1, 2 and 3 // all committing to the value 0 (which is what we do for tx_kernels) @@ -268,7 +274,10 @@ mod test { let commit_3 = keychain.secp.commit(0, skey3.clone()).unwrap(); // now sum commitments for keys 1 and 2 - let sum = secp::Secp256k1::commit_sum(vec![commit_1, commit_2], vec![]).unwrap(); + let sum = keychain + .secp + .commit_sum(vec![commit_1, commit_2], vec![]) + .unwrap(); // confirm the commitment for key 3 matches the sum of the commitments 1 and 2 assert_eq!(sum, commit_3); diff --git a/keychain/src/lib.rs b/keychain/src/lib.rs index 3d61054d02..d65d9bffcc 100644 --- a/keychain/src/lib.rs +++ b/keychain/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/keychain/src/mnemonic.rs b/keychain/src/mnemonic.rs index 51ef3d86dd..f33c832515 100644 --- a/keychain/src/mnemonic.rs +++ b/keychain/src/mnemonic.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,7 +18,6 @@ //! at https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki use digest::Digest; -use failure::Fail; use hmac::Hmac; use pbkdf2::pbkdf2; use sha2::{Sha256, Sha512}; @@ -29,19 +28,16 @@ lazy_static! { } /// An error that might occur during mnemonic decoding -#[derive(Fail, Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] +#[derive(thiserror::Error, Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] pub enum Error { /// Invalid word encountered - #[fail(display = "invalid bip39 word {}", _0)] + #[error("invalid bip39 word {0}")] BadWord(String), /// Checksum was not correct (expected, actual) - #[fail( - display = "bip39 checksum 0x{:x} does not match expected 0x{:x}", - _0, _1 - )] + #[error("bip39 checksum 0x{0:x} does not match expected 0x{1:x}")] BadChecksum(u8, u8), /// The number of words/bytes was invalid - #[fail(display = "invalid mnemonic/entropy length {}", _0)] + #[error("invalid mnemonic/entropy length {0}")] InvalidLength(usize), } @@ -91,7 +87,7 @@ pub fn to_entropy(mnemonic: &str) -> Result, Error> { let mut hash = [0; 32]; let mut sha2sum = Sha256::default(); - sha2sum.update(&entropy.clone()); + sha2sum.update(&entropy); hash.copy_from_slice(sha2sum.finalize().as_slice()); let actual = (hash[0] >> (8 - checksum_bits)) & mask; diff --git a/keychain/src/types.rs b/keychain/src/types.rs index e6dddd73b7..ea92285db7 100644 --- a/keychain/src/types.rs +++ b/keychain/src/types.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -33,7 +33,6 @@ use crate::util::secp::key::{PublicKey, SecretKey, ZERO_KEY}; use crate::util::secp::pedersen::Commitment; use crate::util::secp::{self, Message, Secp256k1, Signature}; use crate::util::ToHex; -use failure::Fail; use zeroize::Zeroize; use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; @@ -41,19 +40,19 @@ use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; // Size of an identifier in bytes pub const IDENTIFIER_SIZE: usize = 17; -#[derive(Fail, PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] +#[derive(thiserror::Error, PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] pub enum Error { - #[fail(display = "Keychain secp error, {}", _0)] + #[error("Keychain secp error, {0}")] Secp(String), - #[fail(display = "Keychain derivation key error, {}", _0)] + #[error("Keychain derivation key error, {0}")] KeyDerivation(extkey_bip32::Error), - #[fail(display = "Keychain Transaction error, {}", _0)] + #[error("Keychain Transaction error, {0}")] Transaction(String), - #[fail(display = "Keychain range proof error, {}", _0)] + #[error("Keychain range proof error, {0}")] RangeProof(String), - #[fail(display = "Keychain unknown commitment type")] - SwitchCommitmentType, - #[fail(display = "Keychain generic error, {}", _0)] + #[error("Keychain unknown commitment type")] + SwitchCommitment, + #[error("Keychain generic error, {0}")] GenericError(String), } @@ -173,8 +172,8 @@ impl Identifier { self.0 } - pub fn from_pubkey(pubkey: &PublicKey) -> Identifier { - let bytes = pubkey.serialize_vec(true); + pub fn from_pubkey(secp: &Secp256k1, pubkey: &PublicKey) -> Identifier { + let bytes = pubkey.serialize_vec(secp, true); let identifier = blake2b(IDENTIFIER_SIZE, &[], &bytes[..]); Identifier::from_bytes(&identifier.as_bytes()) } @@ -188,7 +187,7 @@ impl Identifier { pub fn from_secret_key(secp: &Secp256k1, key: &SecretKey) -> Result { let key_id = PublicKey::from_secret_key(secp, key) .map_err(|e| Error::Secp(format!("{}", e.description())))?; - Ok(Identifier::from_pubkey(&key_id)) + Ok(Identifier::from_pubkey(secp, &key_id)) } pub fn from_hex(hex: &str) -> Result { @@ -264,8 +263,8 @@ impl BlindingFactor { self.0 == ZERO_KEY.as_ref() } - pub fn rand() -> BlindingFactor { - BlindingFactor::from_secret_key(SecretKey::new(&mut thread_rng())) + pub fn rand(secp: &Secp256k1) -> BlindingFactor { + BlindingFactor::from_secret_key(SecretKey::new(secp, &mut thread_rng())) } pub fn from_hex(hex: &str) -> Result { @@ -278,26 +277,27 @@ impl BlindingFactor { // Handle "zero" blinding_factor correctly, by returning the "zero" key. // We need this for some of the tests. - pub fn secret_key(&self) -> Result { + pub fn secret_key(&self, secp: &Secp256k1) -> Result { if self.is_zero() { Ok(ZERO_KEY) } else { - SecretKey::from_slice(&self.0).map_err(|e| Error::Secp(format!("{}", e.description()))) + SecretKey::from_slice(secp, &self.0) + .map_err(|e| Error::Secp(format!("{}", e.description()))) } } // Convenient (and robust) way to add two blinding_factors together. // Handles "zero" blinding_factors correctly. - pub fn add(&self, other: &BlindingFactor) -> Result { + pub fn add(&self, other: &BlindingFactor, secp: &Secp256k1) -> Result { let keys = vec![self, other] .into_iter() .filter(|x| !x.is_zero()) - .filter_map(|x| x.secret_key().ok()) + .filter_map(|x| x.secret_key(secp).ok()) .collect::>(); if keys.is_empty() { Ok(BlindingFactor::zero()) } else { - let sum = Secp256k1::blind_sum(keys, vec![])?; + let sum = secp.blind_sum(keys, vec![])?; Ok(BlindingFactor::from_secret_key(sum)) } } @@ -308,11 +308,15 @@ impl BlindingFactor { /// This prevents an actor from being able to sum a set of inputs, outputs /// and kernels from a block to identify and reconstruct a particular tx /// from a block. You would need both k1, k2 to do this. - pub fn split(&self, blind_1: &BlindingFactor) -> Result { + pub fn split( + &self, + blind_1: &BlindingFactor, + secp: &Secp256k1, + ) -> Result { // use blind_sum to subtract skey_1 from our key such that skey = skey_1 + skey_2 - let skey = self.secret_key()?; - let skey_1 = blind_1.secret_key()?; - let skey_2 = Secp256k1::blind_sum(vec![skey], vec![skey_1])?; + let skey = self.secret_key(secp)?; + let skey_1 = blind_1.secret_key(secp)?; + let skey_2 = secp.blind_sum(vec![skey], vec![skey_1])?; Ok(BlindingFactor::from_secret_key(skey_2)) } } @@ -517,6 +521,7 @@ mod test { use crate::types::{BlindingFactor, ExtKeychainPath, Identifier}; use crate::util::secp::constants::SECRET_KEY_SIZE; use crate::util::secp::key::{SecretKey, ZERO_KEY}; + use crate::util::secp::Secp256k1; use std::slice::from_raw_parts; // This tests cleaning of BlindingFactor (e.g. secret key) on Drop. @@ -550,14 +555,15 @@ mod test { // split a key, sum the split keys and confirm the sum matches the original key #[test] fn split_blinding_factor() { - let skey_in = SecretKey::new(&mut thread_rng()); + let secp = Secp256k1::new(); + let skey_in = SecretKey::new(&secp, &mut thread_rng()); let blind = BlindingFactor::from_secret_key(skey_in.clone()); - let blind_1 = BlindingFactor::rand(); - let blind_2 = blind.split(&blind_1).unwrap(); + let blind_1 = BlindingFactor::rand(&secp); + let blind_2 = blind.split(&blind_1, &secp).unwrap(); - let mut skey_sum = blind_1.secret_key().unwrap(); - let skey_2 = blind_2.secret_key().unwrap(); - skey_sum.add_assign(&skey_2).unwrap(); + let mut skey_sum = blind_1.secret_key(&secp).unwrap(); + let skey_2 = blind_2.secret_key(&secp).unwrap(); + skey_sum.add_assign(&secp, &skey_2).unwrap(); assert_eq!(skey_in, skey_sum); } @@ -565,11 +571,12 @@ mod test { // the same key that we started with (k + 0 = k) #[test] fn zero_key_addition() { - let skey_in = SecretKey::new(&mut thread_rng()); + let secp = Secp256k1::new(); + let skey_in = SecretKey::new(&secp, &mut thread_rng()); let skey_zero = ZERO_KEY; let mut skey_out = skey_in.clone(); - skey_out.add_assign(&skey_zero).unwrap(); + skey_out.add_assign(&secp, &skey_zero).unwrap(); assert_eq!(skey_in, skey_out); } diff --git a/keychain/src/view_key.rs b/keychain/src/view_key.rs index d605681496..b09e390a2c 100644 --- a/keychain/src/view_key.rs +++ b/keychain/src/view_key.rs @@ -1,3 +1,17 @@ +// Copyright 2021 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + use crate::blake2::blake2b::blake2b; use byteorder::{BigEndian, ByteOrder}; //use crate::sha2::{Digest, Sha256}; @@ -66,7 +80,7 @@ impl ViewKey { switch_public_key.mul_assign(secp, &ext_key.secret_key)?; let switch_public_key = Some(switch_public_key); - let rewind_hash = Self::rewind_hash(keychain.public_root_key()); + let rewind_hash = Self::rewind_hash(secp, keychain.public_root_key()); Ok(Self { is_floo, @@ -80,13 +94,14 @@ impl ViewKey { }) } - pub fn rewind_hash(public_root_key: PublicKey) -> Vec { - let ser = public_root_key.serialize_vec(true); + pub fn rewind_hash(secp: &Secp256k1, public_root_key: PublicKey) -> Vec { + let ser = public_root_key.serialize_vec(secp, true); blake2b(32, &[], &ser[..]).as_bytes().to_vec() } fn ckd_pub_tweak( &self, + secp: &Secp256k1, hasher: &mut H, i: ChildNumber, ) -> Result<(SecretKey, ChainCode), Error> @@ -97,14 +112,14 @@ impl ViewKey { ChildNumber::Hardened { .. } => Err(BIP32Error::CannotDeriveFromHardenedKey.into()), ChildNumber::Normal { index: n } => { hasher.init_sha512(&self.chain_code[..]); - hasher.append_sha512(&self.public_key.serialize_vec(true)[..]); + hasher.append_sha512(&self.public_key.serialize_vec(secp, true)[..]); let mut be_n = [0; 4]; BigEndian::write_u32(&mut be_n, n); hasher.append_sha512(&be_n); let result = hasher.result_sha512(); - let secret_key = SecretKey::from_slice(&result[..32])?; + let secret_key = SecretKey::from_slice(secp, &result[..32])?; let chain_code = ChainCode::from(&result[32..]); Ok((secret_key, chain_code)) } @@ -120,7 +135,7 @@ impl ViewKey { where H: BIP32Hasher, { - let (secret_key, chain_code) = self.ckd_pub_tweak(hasher, i)?; + let (secret_key, chain_code) = self.ckd_pub_tweak(secp, hasher, i)?; let mut public_key = self.public_key; public_key.add_exp_assign(secp, &secret_key)?; @@ -129,7 +144,7 @@ impl ViewKey { Some(p) => { let mut j = PublicKey(ffi::PublicKey(GENERATOR_PUB_J_RAW)); j.mul_assign(secp, &secret_key)?; - Some(PublicKey::from_combination(vec![p, &j])?) + Some(PublicKey::from_combination(secp, vec![p, &j])?) } None => None, }; @@ -137,7 +152,7 @@ impl ViewKey { Ok(Self { is_floo: self.is_floo, depth: self.depth + 1, - parent_fingerprint: self.fingerprint(hasher), + parent_fingerprint: self.fingerprint(secp, hasher), child_number: i, public_key, switch_public_key, @@ -152,8 +167,8 @@ impl ViewKey { amount: u64, switch: SwitchCommitmentType, ) -> Result { - let value_key = secp.commit_value(amount)?.to_pubkey()?; - let pub_key = PublicKey::from_combination(vec![&self.public_key, &value_key])?; + let value_key = secp.commit_value(amount)?.to_pubkey(secp)?; + let pub_key = PublicKey::from_combination(secp, vec![&self.public_key, &value_key])?; match switch { SwitchCommitmentType::None => Ok(pub_key), SwitchCommitmentType::Regular => { @@ -172,23 +187,23 @@ impl ViewKey { pub_key.add_exp_assign(secp, &blind)?; Ok(pub_key)*/ - Err(Error::SwitchCommitmentType) + Err(Error::SwitchCommitment) } } } - fn identifier(&self, hasher: &mut H) -> [u8; 20] + fn identifier(&self, secp: &Secp256k1, hasher: &mut H) -> [u8; 20] where H: BIP32Hasher, { - let sha2_res = hasher.sha_256(&self.public_key.serialize_vec(true)[..]); + let sha2_res = hasher.sha_256(&self.public_key.serialize_vec(secp, true)[..]); hasher.ripemd_160(&sha2_res) } - fn fingerprint(&self, hasher: &mut H) -> Fingerprint + fn fingerprint(&self, secp: &Secp256k1, hasher: &mut H) -> Fingerprint where H: BIP32Hasher, { - Fingerprint::from(&self.identifier(hasher)[0..4]) + Fingerprint::from(&self.identifier(secp, hasher)[0..4]) } } diff --git a/p2p/Cargo.toml b/p2p/Cargo.toml index cbf33a4c22..9d09d73bac 100644 --- a/p2p/Cargo.toml +++ b/p2p/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "grin_p2p" -version = "4.4.2" +version = "5.3.2" authors = ["Grin Developers "] description = "Chain implementation for grin, a simple, private and scalable cryptocurrency implementation based on the MimbleWimble chain format." license = "Apache-2.0" @@ -16,8 +16,7 @@ lru-cache = "0.1" tor-stream = "0.2" net2 = "0.2" socks = "0.3.2" -failure = "0.1" -failure_derive = "0.1" +thiserror = "1" num = "0.2" rand = "0.6" serde = "1" @@ -35,11 +34,12 @@ async-std = "1.9" tokio = {version = "0.2", features = ["full"] } ed25519-dalek = "1" serde_json = "1" +bytes = "0.5" -grin_core = { path = "../core", version = "4.4.2" } -grin_store = { path = "../store", version = "4.4.2" } -grin_util = { path = "../util", version = "4.4.2" } -grin_chain = { path = "../chain", version = "4.4.2" } +grin_core = { path = "../core", version = "5.3.2" } +grin_store = { path = "../store", version = "5.3.2" } +grin_util = { path = "../util", version = "5.3.2" } +grin_chain = { path = "../chain", version = "5.3.2" } [dev-dependencies] -grin_pool = { path = "../pool", version = "4.4.2" } \ No newline at end of file +grin_pool = { path = "../pool", version = "5.3.2" } diff --git a/p2p/src/codec.rs b/p2p/src/codec.rs new file mode 100644 index 0000000000..795dabb0be --- /dev/null +++ b/p2p/src/codec.rs @@ -0,0 +1,288 @@ +// Copyright 2021 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Provides a connection wrapper that handles the lower level tasks in sending +//! or receiving data from the TCP socket, as well as dealing with timeouts. +//! +//! Because of a few idiosyncracies in the Rust `TcpStream`, this has to use +//! async I/O to be able to both read *and* write on the connection. Which +//! forces us to go through some additional gymnastic to loop over the async +//! stream and make sure we get the right number of bytes out. + +use crate::grin_core::global::header_size_bytes; +use crate::grin_core::ser::{BufReader, ProtocolVersion, Readable}; +use crate::msg::{Message, MsgHeader, MsgHeaderWrapper, Type}; +use crate::types::{AttachmentMeta, AttachmentUpdate, Error}; +use crate::{ + grin_core::core::block::{BlockHeader, UntrustedBlockHeader}, + msg::HeadersData, +}; +use bytes::{Buf, BufMut, Bytes, BytesMut}; +use grin_core::ser::Reader; +use std::cmp::min; +use std::io::Read; +use std::mem; +use std::net::TcpStream; +use std::sync::Arc; +use std::time::{Duration, Instant}; +use MsgHeaderWrapper::*; +use State::*; + +const HEADER_IO_TIMEOUT: Duration = Duration::from_millis(2000); +pub const BODY_IO_TIMEOUT: Duration = Duration::from_millis(60000); +const HEADER_BATCH_SIZE: usize = 32; + +enum State { + None, + Header(MsgHeaderWrapper), + BlockHeaders { + bytes_left: usize, + items_left: usize, + headers: Vec, + }, + Attachment(usize, Arc, Instant), +} + +impl State { + fn is_none(&self) -> bool { + match self { + State::None => true, + _ => false, + } + } +} + +pub struct Codec { + pub version: ProtocolVersion, + stream: TcpStream, + buffer: BytesMut, + state: State, + bytes_read: usize, +} + +impl Codec { + pub fn new(version: ProtocolVersion, stream: TcpStream) -> Self { + Self { + version, + stream, + buffer: BytesMut::with_capacity(8 * 1024), + state: None, + bytes_read: 0, + } + } + + /// Destroy the codec and return the reader + pub fn stream(self) -> TcpStream { + self.stream + } + + /// Inform codec next `len` bytes are an attachment + /// Panics if already reading a body + pub fn expect_attachment(&mut self, meta: Arc) { + assert!(self.state.is_none()); + self.state = Attachment(meta.size, meta, Instant::now()); + } + + /// Length of the next item we are expecting, could be msg header, body, block header or attachment chunk + fn next_len(&self) -> usize { + match &self.state { + None => MsgHeader::LEN, + Header(Known(h)) if h.msg_type == Type::Headers => { + // If we are receiving a list of headers, read off the item count first + min(h.msg_len as usize, 2) + } + Header(Known(header)) => header.msg_len as usize, + Header(Unknown(len, _)) => *len as usize, + BlockHeaders { bytes_left, .. } => { + // The header length varies with the number of edge bits. Therefore we overestimate + // its size and only actually read the bytes we need + min(*bytes_left, header_size_bytes(63)) + } + Attachment(left, _, _) => min(*left, 48_000), + } + } + + /// Set stream timeout depending on the next expected item + fn set_stream_timeout(&self) -> Result<(), Error> { + let timeout = match &self.state { + None => HEADER_IO_TIMEOUT, + _ => BODY_IO_TIMEOUT, + }; + self.stream.set_read_timeout(Some(timeout))?; + Ok(()) + } + + fn read_inner(&mut self) -> Result { + self.bytes_read = 0; + loop { + let next_len = self.next_len(); + let pre_len = self.buffer.len(); + // Buffer could already be partially filled, calculate additional bytes we need + let to_read = next_len.saturating_sub(pre_len); + if to_read > 0 { + self.buffer.reserve(to_read); + for _ in 0..to_read { + self.buffer.put_u8(0); + } + self.set_stream_timeout()?; + if let Err(e) = self.stream.read_exact(&mut self.buffer[pre_len..]) { + // Undo reserved bytes on a failed read + self.buffer.truncate(pre_len); + return Err(e.into()); + } + self.bytes_read += to_read; + } + match &mut self.state { + None => { + // Parse header and keep reading + let mut raw = self.buffer.split_to(next_len).freeze(); + let mut reader = BufReader::new(&mut raw, self.version); + let header = MsgHeaderWrapper::read(&mut reader)?; + self.state = Header(header); + } + Header(Known(header)) => { + let mut raw = self.buffer.split_to(next_len).freeze(); + if header.msg_type == Type::Headers { + // Special consideration for a list of headers, as we want to verify and process + // them as they come in instead of only after the full list has been received + let mut reader = BufReader::new(&mut raw, self.version); + let items_left = reader.read_u16()? as usize; + self.state = BlockHeaders { + bytes_left: header.msg_len as usize - 2, + items_left, + headers: Vec::with_capacity(min(HEADER_BATCH_SIZE, items_left)), + }; + } else { + // Return full message + let msg = decode_message(header, &mut raw, self.version); + self.state = None; + return msg; + } + } + Header(Unknown(_, msg_type)) => { + // Discard body and return + let msg_type = *msg_type; + self.buffer.advance(next_len); + self.state = None; + return Ok(Message::Unknown(msg_type)); + } + BlockHeaders { + bytes_left, + items_left, + headers, + } => { + if *bytes_left == 0 { + // Incorrect item count + self.state = None; + return Err(Error::BadMessage); + } + + let mut reader = BufReader::new(&mut self.buffer, self.version); + let header: UntrustedBlockHeader = reader.body()?; + let bytes_read = reader.bytes_read() as usize; + headers.push(header.into()); + *bytes_left = bytes_left.saturating_sub(bytes_read); + *items_left -= 1; + let remaining = *items_left as u64; + if headers.len() == HEADER_BATCH_SIZE || remaining == 0 { + let mut h = Vec::with_capacity(min(HEADER_BATCH_SIZE, *items_left)); + mem::swap(headers, &mut h); + if remaining == 0 { + let bytes_left = *bytes_left; + self.state = None; + if bytes_left > 0 { + return Err(Error::BadMessage); + } + } + return Ok(Message::Headers(HeadersData { + headers: h, + remaining, + })); + } + } + Attachment(left, meta, now) => { + let raw = self.buffer.split_to(next_len).freeze(); + *left -= next_len; + if now.elapsed().as_secs() > 10 { + *now = Instant::now(); + debug!("attachment: {}/{}", meta.size - *left, meta.size); + } + let update = AttachmentUpdate { + read: next_len, + left: *left, + meta: Arc::clone(meta), + }; + if *left == 0 { + self.state = None; + debug!("attachment: DONE"); + } + return Ok(Message::Attachment(update, Some(raw))); + } + } + } + } + + /// Blocking read of the next message + pub fn read(&mut self) -> (Result, u64) { + let msg = self.read_inner(); + (msg, self.bytes_read as u64) + } +} + +// TODO: replace with a macro? +fn decode_message( + header: &MsgHeader, + body: &mut Bytes, + version: ProtocolVersion, +) -> Result { + let mut msg = BufReader::new(body, version); + let c = match header.msg_type { + Type::Ping => Message::Ping(msg.body()?), + Type::Pong => Message::Pong(msg.body()?), + Type::BanReason => Message::BanReason(msg.body()?), + Type::TransactionKernel => Message::TransactionKernel(msg.body()?), + Type::GetTransaction => Message::GetTransaction(msg.body()?), + Type::Transaction => Message::Transaction(msg.body()?), + Type::StemTransaction => Message::StemTransaction(msg.body()?), + Type::GetBlock => Message::GetBlock(msg.body()?), + Type::Block => Message::Block(msg.body()?), + Type::GetCompactBlock => Message::GetCompactBlock(msg.body()?), + Type::CompactBlock => Message::CompactBlock(msg.body()?), + Type::GetHeaders => Message::GetHeaders(msg.body()?), + Type::Header => Message::Header(msg.body()?), + Type::GetPeerAddrs => Message::GetPeerAddrs(msg.body()?), + Type::PeerAddrs => Message::PeerAddrs(msg.body()?), + Type::TxHashSetRequest => Message::TxHashSetRequest(msg.body()?), + Type::TxHashSetArchive => Message::TxHashSetArchive(msg.body()?), + Type::GetOutputBitmapSegment => Message::GetOutputBitmapSegment(msg.body()?), + Type::OutputBitmapSegment => Message::OutputBitmapSegment(msg.body()?), + Type::StartPibdSyncRequest => Message::StartPibdSyncRequest(msg.body()?), + Type::PibdSyncState => Message::PibdSyncState(msg.body()?), + Type::GetOutputSegment => Message::GetOutputSegment(msg.body()?), + Type::OutputSegment => Message::OutputSegment(msg.body()?), + Type::GetRangeProofSegment => Message::GetRangeProofSegment(msg.body()?), + Type::RangeProofSegment => Message::RangeProofSegment(msg.body()?), + Type::GetKernelSegment => Message::GetKernelSegment(msg.body()?), + Type::KernelSegment => Message::KernelSegment(msg.body()?), + Type::HasAnotherArchiveHeader => Message::HasAnotherArchiveHeader(msg.body()?), + Type::Error | Type::Hand | Type::Shake | Type::Headers => { + return Err(Error::UnexpectedMessage(format!( + "get message with type {:?} (code {})", + header.msg_type, header.msg_type as u32 + ))) + } + Type::TorAddress => Message::TorAddress(msg.body()?), + }; + Ok(c) +} diff --git a/p2p/src/conn.rs b/p2p/src/conn.rs index f99ac2118f..0d6f78d087 100644 --- a/p2p/src/conn.rs +++ b/p2p/src/conn.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -20,40 +20,28 @@ //! forces us to go through some additional gymnastic to loop over the async //! stream and make sure we get the right number of bytes out. -use crate::core::ser; -use crate::core::ser::ProtocolVersion; -use crate::msg::{ - read_body, read_discard, read_header, read_item, write_message, Msg, MsgHeader, - MsgHeaderWrapper, -}; +use crate::codec::{Codec, BODY_IO_TIMEOUT}; +use crate::grin_core::ser::ProtocolVersion; +use crate::msg::{write_message, Consumed, Message, Msg}; use crate::types::Error; use crate::util::{RateCounter, RwLock}; -use std::io::{self, Read, Write}; +use std::fs::File; +use std::io::{self, Write}; use std::net::{Shutdown, TcpStream}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::RecvTimeoutError; use std::sync::{mpsc, Arc}; +use std::thread::{self, JoinHandle}; use std::time::Duration; -use std::{ - cmp, - thread::{self, JoinHandle}, -}; pub const SEND_CHANNEL_CAP: usize = 100; -const HEADER_IO_TIMEOUT: Duration = Duration::from_millis(10000); const CHANNEL_TIMEOUT: Duration = Duration::from_millis(15000); -const BODY_IO_TIMEOUT: Duration = Duration::from_millis(90000); /// A trait to be implemented in order to receive messages from the /// connection. Allows providing an optional response. pub trait MessageHandler: Send + 'static { - fn consume<'a, R: Read>( - &mut self, - msg: Message<'a, R>, - stopped: Arc, - tracker: Arc, - ) -> Result, Error>; + fn consume(&self, message: Message) -> Result; } // Macro to simplify the boilerplate around I/O and Grin error handling @@ -79,54 +67,6 @@ macro_rules! try_break { }; } -macro_rules! try_header { - ($res:expr, $conn: expr) => {{ - let _ = $conn.set_read_timeout(Some(HEADER_IO_TIMEOUT)); - try_break!($res) - }}; -} - -/// A message as received by the connection. Provides access to the message -/// header lazily consumes the message body, handling its deserialization. -pub struct Message<'a, R: Read> { - pub header: MsgHeader, - stream: &'a mut R, - version: ProtocolVersion, -} - -impl<'a, R: Read> Message<'a, R> { - fn from_header(header: MsgHeader, stream: &'a mut R, version: ProtocolVersion) -> Self { - Message { - header, - stream, - version, - } - } - - /// Read the message body from the underlying connection - pub fn body(&mut self) -> Result { - read_body(&self.header, self.stream, self.version) - } - - /// Read a single "thing" from the underlying connection. - /// Return the thing and the total bytes read. - pub fn streaming_read(&mut self) -> Result<(T, u64), Error> { - read_item(self.stream, self.version) - } - - pub fn copy_attachment(&mut self, len: usize, writer: &mut dyn Write) -> Result { - let mut written = 0; - while written < len { - let read_len = cmp::min(8000, len - written); - let mut buf = vec![0u8; read_len]; - self.stream.read_exact(&mut buf[..])?; - writer.write_all(&buf)?; - written += read_len; - } - Ok(written) - } -} - pub struct StopHandle { /// Channel to close the connection stopped: Arc, @@ -272,7 +212,7 @@ fn poll( conn: TcpStream, conn_handle: ConnHandle, version: ProtocolVersion, - mut handler: H, + handler: H, send_rx: mpsc::Receiver, stopped: Arc, tracker: Arc, @@ -281,7 +221,7 @@ where H: MessageHandler, { // Split out tcp stream out into separate reader/writer halves. - let mut reader = conn.try_clone().expect("clone conn for reader failed"); + let reader = conn.try_clone().expect("clone conn for reader failed"); let mut writer = conn.try_clone().expect("clone conn for writer failed"); let reader_stopped = stopped.clone(); @@ -291,58 +231,92 @@ where let reader_thread = thread::Builder::new() .name("peer_read".to_string()) .spawn(move || { + let peer_addr = reader + .peer_addr() + .map(|a| a.to_string()) + .unwrap_or_else(|_| "?".to_owned()); + let mut codec = Codec::new(version, reader); + let mut attachment: Option = None; loop { - // check the read end - match try_header!(read_header(&mut reader, version), &reader) { - Some(MsgHeaderWrapper::Known(header)) => { - let _ = reader.set_read_timeout(Some(BODY_IO_TIMEOUT)); - let msg = Message::from_header(header, &mut reader, version); - - trace!( - "Received message header, type {:?}, len {}.", - msg.header.msg_type, - msg.header.msg_len - ); - - // Increase received bytes counter - reader_tracker.inc_received(MsgHeader::LEN as u64 + msg.header.msg_len); + // check the close channel + if reader_stopped.load(Ordering::Relaxed) { + break; + } - let resp_msg = try_break!(handler.consume( - msg, - reader_stopped.clone(), - reader_tracker.clone() - )); - if let Some(Some(resp_msg)) = resp_msg { - try_break!(conn_handle.send(resp_msg)); + // check the read end + let (next, bytes_read) = codec.read(); + + // increase the appropriate counter + match &next { + Ok(Message::Attachment(_, _)) => reader_tracker.inc_quiet_received(bytes_read), + Ok(Message::Headers(data)) => { + // We process a full 512 headers locally in smaller 32 header batches. + // We only want to increment the msg count once for the full 512 headers. + if data.remaining == 0 { + reader_tracker.inc_received(bytes_read); + } else { + reader_tracker.inc_quiet_received(bytes_read); } } - Some(MsgHeaderWrapper::Unknown(msg_len, type_byte)) => { + _ => reader_tracker.inc_received(bytes_read), + } + + let message = match try_break!(next) { + Some(Message::Unknown(type_byte)) => { debug!( - "Received unknown message header, type {:?}, len {}.", - type_byte, msg_len + "Received unknown message, type {:?}, len {}.", + type_byte, bytes_read ); - // Increase received bytes counter - reader_tracker.inc_received(MsgHeader::LEN as u64 + msg_len); + continue; + } + Some(Message::Attachment(update, bytes)) => { + let a = match &mut attachment { + Some(a) => a, + None => { + error!("Received unexpected attachment chunk"); + break; + } + }; + + let bytes = bytes.unwrap(); + if let Err(e) = a.write_all(&bytes) { + error!("Unable to write attachment file: {}", e); + break; + } + if update.left == 0 { + if let Err(e) = a.sync_all() { + error!("Unable to sync attachment file: {}", e); + break; + } + attachment.take(); + } - try_break!(read_discard(msg_len, &mut reader)); + Message::Attachment(update, None) } - None => {} - } + Some(message) => { + trace!("Received message, type {}, len {}.", message, bytes_read); + message + } + None => continue, + }; - // check the close channel - if reader_stopped.load(Ordering::Relaxed) { - break; + let consumed = try_break!(handler.consume(message)).unwrap_or(Consumed::None); + match consumed { + Consumed::Response(resp_msg) => { + try_break!(conn_handle.send(resp_msg)); + } + Consumed::Attachment(meta, file) => { + // Start attachment + codec.expect_attachment(meta); + attachment = Some(file); + } + Consumed::Disconnect => break, + Consumed::None => {} } } - debug!( - "Shutting down reader connection with {}", - reader - .peer_addr() - .map(|a| a.to_string()) - .unwrap_or_else(|_| "?".to_owned()) - ); - let _ = reader.shutdown(Shutdown::Both); + debug!("Shutting down reader connection with {}", peer_addr); + let _ = codec.stream().shutdown(Shutdown::Both); })?; let writer_thread = thread::Builder::new() diff --git a/p2p/src/handshake.rs b/p2p/src/handshake.rs index 65d18ac182..8cdd213ef9 100644 --- a/p2p/src/handshake.rs +++ b/p2p/src/handshake.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -13,9 +13,9 @@ // limitations under the License. use crate::conn::Tracker; -use crate::core::core::hash::Hash; -use crate::core::pow::Difficulty; -use crate::core::ser::ProtocolVersion; +use crate::grin_core::core::hash::Hash; +use crate::grin_core::pow::Difficulty; +use crate::grin_core::ser::ProtocolVersion; use crate::msg::{read_message, write_message, Hand, Msg, Shake, TorAddress, Type, USER_AGENT}; use crate::peer::Peer; use crate::types::{ @@ -118,8 +118,11 @@ impl Handshake { let nonce = self.next_nonce(); let peer_addr = peer_addr.unwrap_or(match conn.peer_addr() { Ok(addr) => PeerAddr::Ip(addr), - Err(_) => { - return Err(Error::ConnectionClose); + Err(e) => { + return Err(Error::ConnectionClose(format!( + "unable to get peer address, {}", + e + ))) } }); @@ -182,7 +185,10 @@ impl Handshake { // If denied then we want to close the connection // (without providing our peer with any details why). if Peer::is_denied(&self.config, peer_info.addr.clone()) { - return Err(Error::ConnectionClose); + return Err(Error::ConnectionClose(format!( + "{:?} is denied", + peer_info.addr + ))); } debug!( @@ -256,7 +262,9 @@ impl Handshake { // If denied then we want to close the connection // (without providing our peer with any details why). if Peer::is_denied(&self.config, peer_info.addr.clone()) { - return Err(Error::ConnectionClose); + return Err(Error::ConnectionClose(String::from( + "Peer denied because it is in config black list", + ))); } // send our reply with our info diff --git a/p2p/src/lib.rs b/p2p/src/lib.rs index e40bc115c2..77bd0b37d6 100644 --- a/p2p/src/lib.rs +++ b/p2p/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -27,7 +27,7 @@ extern crate bitflags; extern crate enum_primitive; #[macro_use] -extern crate grin_core as core; +extern crate grin_core; use grin_chain as chain; use grin_util as util; @@ -39,6 +39,7 @@ extern crate log; #[macro_use] extern crate lazy_static; +mod codec; mod conn; pub mod handshake; pub mod libp2p_connection; @@ -47,7 +48,7 @@ mod peer; mod peers; mod protocol; mod serv; -mod store; +pub mod store; pub mod types; pub use crate::conn::SEND_CHANNEL_CAP; diff --git a/p2p/src/libp2p_connection.rs b/p2p/src/libp2p_connection.rs index f8af7de548..3cb6c197b8 100644 --- a/p2p/src/libp2p_connection.rs +++ b/p2p/src/libp2p_connection.rs @@ -38,7 +38,7 @@ use libp2p::gossipsub::{ }; use libp2p::gossipsub::{Gossipsub, MessageAcceptance, TopicHash}; -use crate::core::global; +use crate::grin_core::global; use crate::types::Error; use crate::PeerAddr; use async_std::task; @@ -50,8 +50,8 @@ use grin_core::core::TxKernel; use grin_core::libtx::aggsig; use grin_util::secp::pedersen::Commitment; use grin_util::secp::rand::Rng; -use grin_util::secp::{ContextFlag, Message, Secp256k1, Signature}; -use grin_util::RwLock; +use grin_util::secp::{Message, Secp256k1, Signature}; +use grin_util::{static_secp_instance, RwLock}; use grin_util::{Mutex, OnionV3Address, OnionV3AddressError, ToHex}; use libp2p::core::network::NetworkInfo; use rand::seq::SliceRandom; @@ -592,12 +592,16 @@ pub async fn run_libp2p_node( let gossip = swarm.get_behaviour(); + let secp = static_secp_instance(); + let secp = secp.lock(); + let acceptance = match validate_integrity_message( &peer_id, &message.data, kernel_validation_fn.clone(), &mut requests_cash, fee_base, + &secp, ) { Ok((integrity_fee, sender_address)) => { if integrity_fee > 0 { @@ -800,6 +804,7 @@ pub fn validate_integrity_message( output_validation_fn: Arc Result, Error>>, requests_cash: &mut HashMap>, fee_base: u64, + secp: &Secp256k1, ) -> Result<(u64, String), Error> { let mut ser = SimplePopSerializer::new(message); if ser.version != get_message_version() { @@ -813,7 +818,7 @@ pub fn validate_integrity_message( // Let's check signature first. The kernel search might take time. Signature checking should be faster. let integrity_kernel_excess = Commitment::from_vec(ser.pop_vec()); - let integrity_pk = match integrity_kernel_excess.to_pubkey() { + let integrity_pk = match integrity_kernel_excess.to_pubkey(secp) { Ok(pk) => pk, Err(e) => { debug!( @@ -824,8 +829,6 @@ pub fn validate_integrity_message( } }; - let secp = Secp256k1::with_caps(ContextFlag::VerifyOnly); - // Checking if public key match the signature. let sender_address_pk = match DalekPublicKey::from_bytes(&ser.pop_vec()) { Ok(pk) => pk, @@ -851,7 +854,7 @@ pub fn validate_integrity_message( let sender_address = PeerId::onion_v3_from_pubkey(&sender_address_pk); - let signature = match Signature::from_compact(&ser.pop_vec()) { + let signature = match Signature::from_compact(&secp, &ser.pop_vec()) { Ok(s) => s, Err(e) => { debug!( @@ -863,7 +866,7 @@ pub fn validate_integrity_message( }; match aggsig::verify_completed_sig( - &secp, + secp, &signature, &integrity_pk, Some(&integrity_pk), @@ -890,7 +893,7 @@ pub fn validate_integrity_message( } }; - let integrity_fee = integrity_kernel.features.get_fee(); + let integrity_fee = integrity_kernel.features.get_fee_pessimistic(); if integrity_fee < fee_base * INTEGRITY_FEE_MIN_X { debug!( @@ -962,12 +965,13 @@ pub fn build_integrity_message( tor_pk: &DalekPublicKey, signature: &Signature, message_data: &[u8], + secp: &Secp256k1, ) -> Result, Error> { let mut ser = SimplePushSerializer::new(get_message_version()); ser.push_vec(&kernel_excess.0); ser.push_vec(tor_pk.as_bytes()); - ser.push_vec(&signature.serialize_compact()); + ser.push_vec(&signature.serialize_compact(secp)); ser.push_vec(message_data); Ok(ser.to_vec()) @@ -979,22 +983,30 @@ pub fn build_integrity_message( fn test_integrity() -> Result<(), Error> { use grin_core::core::KernelFeatures; use grin_util::from_hex; + use grin_util::secp::ContextFlag; // It is peer form wallet's test. We know commit and signature for it. let peer_id = PeerId::from_bytes( &from_hex("000100220020720661bf2f0d7c81c2980db83bb973be2816cf5a0da2da9aacd0ad47d534215c001c2f6f6e696f6e332f776861745f657665725f616464726573733a3737").unwrap() ).unwrap(); let peer_pk = peer_id.as_dalek_pubkey().unwrap(); let onion_address = PeerId::onion_v3_from_pubkey(&peer_pk); + let secp = Secp256k1::with_caps(ContextFlag::Full); + let integrity_kernel = Commitment::from_vec( from_hex("08a8f99853d65cee63c973a78a005f4646b777262440a8bfa090694a339a388865").unwrap(), ); - let integrity_signature = Signature::from_compact(&from_hex("102a84ec71494d69c1b4cca181b7715beea1ebd0822efb4d6440a0f2be75119b56270affac659214c27903347676c27063dc7f5f2f0c6a8441cab73d16aa7ebe").unwrap()).unwrap(); + let integrity_signature = Signature::from_compact(&secp, &from_hex("102a84ec71494d69c1b4cca181b7715beea1ebd0822efb4d6440a0f2be75119b56270affac659214c27903347676c27063dc7f5f2f0c6a8441cab73d16aa7ebe").unwrap()).unwrap(); let message: Vec = vec![1, 2, 3, 4, 3, 2, 1]; - let encoded_message = - build_integrity_message(&integrity_kernel, &peer_pk, &integrity_signature, &message) - .unwrap(); + let encoded_message = build_integrity_message( + &integrity_kernel, + &peer_pk, + &integrity_signature, + &message, + &secp, + ) + .unwrap(); // Validation use case let mut requests_cache: HashMap> = HashMap::new(); @@ -1010,7 +1022,9 @@ fn test_integrity() -> Result<(), Error> { valid_kernels.insert( integrity_kernel, TxKernel::with_features(KernelFeatures::Plain { - fee: paid_integrity_fee, + fee: paid_integrity_fee + .try_into() + .expect("Failed to convert the paid_integrity_fee"), }), ); let output_validation_fn = |commit: &Commitment| -> Result, Error> { @@ -1025,7 +1039,8 @@ fn test_integrity() -> Result<(), Error> { &encoded_message, empty_output_validation_fn.clone(), &mut requests_cache, - fee_base + fee_base, + &secp ) .unwrap() .0, @@ -1039,6 +1054,7 @@ fn test_integrity() -> Result<(), Error> { output_validation_fn.clone(), &mut requests_cache, fee_base, + &secp, ) .unwrap(); @@ -1055,7 +1071,8 @@ fn test_integrity() -> Result<(), Error> { &encoded_message, output_validation_fn.clone(), &mut requests_cache, - fee_base + fee_base, + &secp ) .unwrap() .0, @@ -1071,7 +1088,8 @@ fn test_integrity() -> Result<(), Error> { &encoded_message, output_validation_fn.clone(), &mut requests_cache, - fee_base + fee_base, + &secp ) .unwrap() .0, @@ -1087,7 +1105,8 @@ fn test_integrity() -> Result<(), Error> { &encoded_message, output_validation_fn.clone(), &mut requests_cache, - fee_base + fee_base, + &secp ) .unwrap() .0, @@ -1102,7 +1121,8 @@ fn test_integrity() -> Result<(), Error> { &encoded_message, output_validation_fn.clone(), &mut requests_cache, - fee_base + fee_base, + &secp ) .unwrap() .0, @@ -1117,7 +1137,8 @@ fn test_integrity() -> Result<(), Error> { &encoded_message, output_validation_fn.clone(), &mut requests_cache, - fee_base + fee_base, + &secp ) .unwrap() .0, diff --git a/p2p/src/msg.rs b/p2p/src/msg.rs index 8bf2b3e232..f453d3ddab 100644 --- a/p2p/src/msg.rs +++ b/p2p/src/msg.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,21 +14,31 @@ //! Message types that transit over the network and related serialization code. +use crate::chain::txhashset::BitmapSegment; use crate::conn::Tracker; -use crate::core::core::hash::Hash; -use crate::core::core::BlockHeader; -use crate::core::pow::Difficulty; -use crate::core::ser::{ - self, ProtocolVersion, Readable, Reader, StreamingReader, Writeable, Writer, +use crate::grin_core::core::hash::Hash; +use crate::grin_core::core::transaction::{OutputIdentifier, TxKernel}; +use crate::grin_core::core::{ + BlockHeader, Segment, SegmentIdentifier, Transaction, UntrustedBlock, UntrustedBlockHeader, + UntrustedCompactBlock, }; -use crate::core::{consensus, global}; +use crate::grin_core::pow::Difficulty; +use crate::grin_core::ser::{ + self, DeserializationMode, ProtocolVersion, Readable, Reader, StreamingReader, Writeable, + Writer, +}; +use crate::grin_core::{consensus, global}; use crate::types::{ - Capabilities, Error, PeerAddr, ReasonForBan, MAX_BLOCK_HEADERS, MAX_LOCATORS, MAX_PEER_ADDRS, + AttachmentMeta, AttachmentUpdate, Capabilities, Error, PeerAddr, ReasonForBan, + MAX_BLOCK_HEADERS, MAX_LOCATORS, MAX_PEER_ADDRS, }; +use crate::util::secp::pedersen::RangeProof; +use bytes::Bytes; use num::FromPrimitive; use std::fs::File; use std::io::{Read, Write}; use std::sync::Arc; +use std::{fmt, thread, time::Duration}; /// Grin's user agent with current version pub const USER_AGENT: &str = concat!("MW/MWC ", env!("CARGO_PKG_VERSION")); @@ -67,12 +77,23 @@ enum_from_primitive! { GetTransaction = 19, TransactionKernel = 20, TorAddress = 23, + StartPibdSyncRequest = 24, + GetOutputBitmapSegment = 25, + OutputBitmapSegment = 26, + GetOutputSegment = 27, + OutputSegment = 28, + GetRangeProofSegment = 29, + RangeProofSegment = 30, + GetKernelSegment = 31, + KernelSegment = 32, + HasAnotherArchiveHeader = 33, + PibdSyncState = 34, } } /// Max theoretical size of a block filled with outputs. fn max_block_size() -> u64 { - (global::max_block_weight() / consensus::BLOCK_OUTPUT_WEIGHT * 708) as u64 + (global::max_block_weight() / consensus::OUTPUT_WEIGHT * 708) as u64 } // Max msg size when msg type is unknown. @@ -99,12 +120,23 @@ fn max_msg_size(msg_type: Type) -> u64 { Type::CompactBlock => max_block_size() / 10, Type::StemTransaction => max_block_size(), Type::Transaction => max_block_size(), - Type::TxHashSetRequest => 40, + Type::TxHashSetRequest => 40, // 32+8=40 Type::TxHashSetArchive => 64, Type::BanReason => 64, Type::GetTransaction => 32, Type::TransactionKernel => 32, Type::TorAddress => 128, + Type::GetOutputBitmapSegment => 41, + Type::OutputBitmapSegment => 2 * max_block_size(), + Type::GetOutputSegment => 41, + Type::OutputSegment => 2 * max_block_size(), + Type::GetRangeProofSegment => 41, + Type::RangeProofSegment => 2 * max_block_size(), + Type::GetKernelSegment => 41, + Type::KernelSegment => 2 * max_block_size(), + Type::StartPibdSyncRequest => 40, // 32+8=40 + Type::HasAnotherArchiveHeader => 40, + Type::PibdSyncState => 72, // 32 + 8 + 32 = 72 } } @@ -155,7 +187,8 @@ pub fn read_header( ) -> Result { let mut head = vec![0u8; MsgHeader::LEN]; stream.read_exact(&mut head)?; - let header: MsgHeaderWrapper = ser::deserialize(&mut &head[..], version)?; + let header: MsgHeaderWrapper = + ser::deserialize(&mut &head[..], version, DeserializationMode::default())?; Ok(header) } @@ -180,7 +213,7 @@ pub fn read_body( ) -> Result { let mut body = vec![0u8; h.msg_len as usize]; stream.read_exact(&mut body)?; - ser::deserialize(&mut &body[..], version).map_err(From::from) + ser::deserialize(&mut &body[..], version, DeserializationMode::default()).map_err(From::from) } /// Read (an unknown) message from the provided stream and discard it. @@ -216,6 +249,17 @@ pub fn write_message( msg: &Msg, tracker: Arc, ) -> Result<(), Error> { + // Introduce a delay so messages are spaced at least 150ms apart. + // This gives a max msg rate of 60000/150 = 400 messages per minute. + // Exceeding 500 messages per minute will result in being banned as abusive. + if let Some(elapsed) = tracker.sent_bytes.read().elapsed_since_last_msg() { + let min_interval: u64 = 150; + let sleep_ms = min_interval.saturating_sub(elapsed); + if sleep_ms > 0 { + thread::sleep(Duration::from_millis(sleep_ms)) + } + } + let mut buf = ser::ser_vec(&msg.header, msg.version)?; buf.extend(&msg.body[..]); stream.write_all(&buf[..])?; @@ -519,6 +563,40 @@ impl Readable for PeerAddrs { } } +impl IntoIterator for PeerAddrs { + type Item = PeerAddr; + type IntoIter = std::vec::IntoIter; + fn into_iter(self) -> Self::IntoIter { + self.peers.into_iter() + } +} + +impl Default for PeerAddrs { + fn default() -> Self { + PeerAddrs { peers: vec![] } + } +} + +impl PeerAddrs { + pub fn as_slice(&self) -> &[PeerAddr] { + self.peers.as_slice() + } + + pub fn contains(&self, addr: &PeerAddr) -> bool { + self.peers.contains(addr) + } + + pub fn difference(&self, other: &[PeerAddr]) -> PeerAddrs { + let peers = self + .peers + .iter() + .filter(|x| !other.contains(x)) + .cloned() + .collect(); + PeerAddrs { peers } + } +} + /// We found some issue in the communication, sending an error back, usually /// followed by closing the connection. pub struct PeerError { @@ -691,16 +769,15 @@ impl Readable for BanReason { } } -/// Request to get an archive of the full txhashset store, required to sync -/// a new node. -pub struct TxHashSetRequest { +/// Request to get PIBD sync request +pub struct ArchiveHeaderData { /// Hash of the block for which the txhashset should be provided pub hash: Hash, /// Height of the corresponding block pub height: u64, } -impl Writeable for TxHashSetRequest { +impl Writeable for ArchiveHeaderData { fn write(&self, writer: &mut W) -> Result<(), ser::Error> { self.hash.write(writer)?; writer.write_u64(self.height)?; @@ -708,15 +785,251 @@ impl Writeable for TxHashSetRequest { } } -impl Readable for TxHashSetRequest { - fn read(reader: &mut R) -> Result { - Ok(TxHashSetRequest { +impl Readable for ArchiveHeaderData { + fn read(reader: &mut R) -> Result { + Ok(ArchiveHeaderData { hash: Hash::read(reader)?, height: reader.read_u64()?, }) } } +pub struct PibdSyncState { + /// Hash of the block for which the txhashset should be provided + pub header_hash: Hash, + /// Height of the corresponding block + pub header_height: u64, + /// output bitmap root hash + pub output_bitmap_root: Hash, +} + +impl Writeable for PibdSyncState { + fn write(&self, writer: &mut W) -> Result<(), ser::Error> { + self.header_hash.write(writer)?; + writer.write_u64(self.header_height)?; + self.output_bitmap_root.write(writer)?; + Ok(()) + } +} + +impl Readable for PibdSyncState { + fn read(reader: &mut R) -> Result { + Ok(PibdSyncState { + header_hash: Hash::read(reader)?, + header_height: reader.read_u64()?, + output_bitmap_root: Hash::read(reader)?, + }) + } +} + +/// Request to get a segment of a (P)MMR at a particular block. +pub struct SegmentRequest { + /// The hash of the block the MMR is associated with + pub block_hash: Hash, + /// The identifier of the requested segment + pub identifier: SegmentIdentifier, +} + +impl Readable for SegmentRequest { + fn read(reader: &mut R) -> Result { + let block_hash = Readable::read(reader)?; + let identifier = Readable::read(reader)?; + Ok(Self { + block_hash, + identifier, + }) + } +} + +impl Writeable for SegmentRequest { + fn write(&self, writer: &mut W) -> Result<(), ser::Error> { + Writeable::write(&self.block_hash, writer)?; + Writeable::write(&self.identifier, writer) + } +} + +/// Response to a (P)MMR segment request. +pub struct SegmentResponse { + /// The hash of the block the MMR is associated with + pub block_hash: Hash, + /// The MMR segment + pub segment: Segment, +} + +impl Readable for SegmentResponse { + fn read(reader: &mut R) -> Result { + let block_hash = Readable::read(reader)?; + let segment = Readable::read(reader)?; + Ok(Self { + block_hash, + segment, + }) + } +} + +impl Writeable for SegmentResponse { + fn write(&self, writer: &mut W) -> Result<(), ser::Error> { + Writeable::write(&self.block_hash, writer)?; + Writeable::write(&self.segment, writer) + } +} + +/// Response to an output PMMR segment request. +pub struct OutputSegmentResponse { + /// The segment response + pub response: SegmentResponse, +} + +impl Readable for OutputSegmentResponse { + fn read(reader: &mut R) -> Result { + let response = Readable::read(reader)?; + Ok(Self { response }) + } +} + +impl Writeable for OutputSegmentResponse { + fn write(&self, writer: &mut W) -> Result<(), ser::Error> { + Writeable::write(&self.response, writer) + } +} + +/// Response to an output bitmap MMR segment request. +pub struct OutputBitmapSegmentResponse { + /// The hash of the block the MMR is associated with + pub block_hash: Hash, + /// The MMR segment + pub segment: BitmapSegment, +} + +impl Readable for OutputBitmapSegmentResponse { + fn read(reader: &mut R) -> Result { + let block_hash = Readable::read(reader)?; + let segment = Readable::read(reader)?; + Ok(Self { + block_hash, + segment, + }) + } +} + +impl Writeable for OutputBitmapSegmentResponse { + fn write(&self, writer: &mut W) -> Result<(), ser::Error> { + Writeable::write(&self.block_hash, writer)?; + Writeable::write(&self.segment, writer) + } +} + +pub enum Message { + Unknown(u8), + Ping(Ping), + Pong(Pong), + BanReason(BanReason), + TransactionKernel(Hash), + GetTransaction(Hash), + Transaction(Transaction), + StemTransaction(Transaction), + GetBlock(Hash), + Block(UntrustedBlock), + GetCompactBlock(Hash), + CompactBlock(UntrustedCompactBlock), + GetHeaders(Locator), + Header(UntrustedBlockHeader), + Headers(HeadersData), + GetPeerAddrs(GetPeerAddrs), + PeerAddrs(PeerAddrs), + TxHashSetRequest(ArchiveHeaderData), + TxHashSetArchive(TxHashSetArchive), + Attachment(AttachmentUpdate, Option), + TorAddress(TorAddress), + StartPibdSyncRequest(ArchiveHeaderData), + PibdSyncState(PibdSyncState), + GetOutputBitmapSegment(SegmentRequest), + OutputBitmapSegment(OutputBitmapSegmentResponse), + GetOutputSegment(SegmentRequest), + OutputSegment(OutputSegmentResponse), + GetRangeProofSegment(SegmentRequest), + RangeProofSegment(SegmentResponse), + GetKernelSegment(SegmentRequest), + KernelSegment(SegmentResponse), + HasAnotherArchiveHeader(ArchiveHeaderData), +} + +/// We receive 512 headers from a peer. +/// But we process them in smaller batches of 32 headers. +/// HeadersData wraps the current batch and a count of the headers remaining after this batch. +pub struct HeadersData { + /// Batch of headers currently being processed. + pub headers: Vec, + /// Number of headers stil to be processed after this current batch. + /// 0 indicates this is the final batch from the larger set of headers received from the peer. + pub remaining: u64, +} + +impl fmt::Display for Message { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Message::Unknown(_) => write!(f, "unknown"), + Message::Ping(_) => write!(f, "ping"), + Message::Pong(_) => write!(f, "pong"), + Message::BanReason(_) => write!(f, "ban reason"), + Message::TransactionKernel(_) => write!(f, "tx kernel"), + Message::GetTransaction(_) => write!(f, "get tx"), + Message::Transaction(_) => write!(f, "tx"), + Message::StemTransaction(_) => write!(f, "stem tx"), + Message::GetBlock(_) => write!(f, "get block"), + Message::Block(_) => write!(f, "block"), + Message::GetCompactBlock(_) => write!(f, "get compact block"), + Message::CompactBlock(_) => write!(f, "compact block"), + Message::GetHeaders(_) => write!(f, "get headers"), + Message::Header(_) => write!(f, "header"), + Message::Headers(_) => write!(f, "headers"), + Message::GetPeerAddrs(_) => write!(f, "get peer addrs"), + Message::PeerAddrs(_) => write!(f, "peer addrs"), + Message::TxHashSetRequest(_) => write!(f, "tx hash set request"), + Message::TxHashSetArchive(_) => write!(f, "tx hash set"), + Message::Attachment(_, _) => write!(f, "attachment"), + Message::TorAddress(_) => write!(f, "tor address"), + Message::GetOutputBitmapSegment(_) => write!(f, "get output bitmap segment"), + Message::OutputBitmapSegment(_) => write!(f, "output bitmap segment"), + Message::GetOutputSegment(_) => write!(f, "get output segment"), + Message::OutputSegment(_) => write!(f, "output segment"), + Message::GetRangeProofSegment(_) => write!(f, "get range proof segment"), + Message::RangeProofSegment(_) => write!(f, "range proof segment"), + Message::GetKernelSegment(_) => write!(f, "get kernel segment"), + Message::KernelSegment(_) => write!(f, "kernel segment"), + Message::PibdSyncState(_) => write!(f, "PIBD sync state"), + Message::StartPibdSyncRequest(_) => write!(f, "start PIBD sync"), + Message::HasAnotherArchiveHeader(_) => { + write!(f, "PIBD error, has another archive header") + } + } + } +} + +impl fmt::Debug for Message { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Consume({})", self) + } +} + +pub enum Consumed { + Response(Msg), + Attachment(Arc, File), + None, + Disconnect, +} + +impl fmt::Debug for Consumed { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Consumed::Response(msg) => write!(f, "Consumed::Response({:?})", msg.header.msg_type), + Consumed::Attachment(meta, _) => write!(f, "Consumed::Attachment({:?})", meta.size), + Consumed::None => write!(f, "Consumed::None"), + Consumed::Disconnect => write!(f, "Consumed::Disconnect"), + } + } +} + /// Response to a txhashset archive request, must include a zip stream of the /// archive after the message body. pub struct TxHashSetArchive { diff --git a/p2p/src/peer.rs b/p2p/src/peer.rs index c8397f3c69..308a956f27 100644 --- a/p2p/src/peer.rs +++ b/p2p/src/peer.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -24,19 +24,25 @@ use std::sync::Arc; use lru_cache::LruCache; use crate::chain; +use crate::chain::txhashset::BitmapChunk; use crate::conn; -use crate::core::core::hash::{Hash, Hashed}; -use crate::core::pow::Difficulty; -use crate::core::ser::Writeable; -use crate::core::{core, global}; +use crate::grin_core::core::hash::{Hash, Hashed}; +use crate::grin_core::core::{OutputIdentifier, Segment, SegmentIdentifier, TxKernel}; +use crate::grin_core::pow::Difficulty; +use crate::grin_core::ser::Writeable; +use crate::grin_core::{core, global}; use crate::handshake::Handshake; -use crate::msg::{self, BanReason, GetPeerAddrs, Locator, Msg, Ping, TxHashSetRequest, Type}; +use crate::msg::{ + self, ArchiveHeaderData, BanReason, GetPeerAddrs, Locator, Msg, Ping, SegmentRequest, Type, +}; use crate::protocol::Protocol; use crate::types::{ Capabilities, ChainAdapter, Error, NetAdapter, P2PConfig, PeerAddr, PeerInfo, ReasonForBan, TxHashSetRead, }; +use crate::util::secp::pedersen::RangeProof; use chrono::prelude::{DateTime, Utc}; +use grin_chain::txhashset::Segmenter; use std::time::Instant; const MAX_TRACK_SIZE: usize = 30; @@ -51,6 +57,60 @@ enum State { Banned, } +pub struct PeerPibdStatus { + /// Hash of the block for which the txhashset should be provided + pub header_hash: Hash, + /// Height of the corresponding block + pub header_height: u64, + /// output bitmap root hash + pub output_bitmap_root: Option, + // History of commited output_bitmaps + header hashes. Needed to ban the peer in case of misbehaviour + pub output_bitmap_root_header_history: Vec, + /// Time when request for pibd start was sent last time (timestamp in seconds) + pub initiate_pibd_request_time: i64, + /// Number of requests that was sent after success response recieved + pub no_response_requests: u32, + /// Time when first no responsive request was sent + pub no_response_time: Option, +} + +impl PeerPibdStatus { + pub fn default() -> PeerPibdStatus { + PeerPibdStatus { + header_hash: Hash::default(), + header_height: 0, + output_bitmap_root: None, + output_bitmap_root_header_history: Vec::new(), + initiate_pibd_request_time: 0, + no_response_requests: 0, + no_response_time: None, + } + } + + pub fn update_pibd_status( + &mut self, + header_hash: Hash, + header_height: u64, + output_bitmap_root: Option, + ) { + self.header_hash = header_hash; + self.header_height = header_height; + + match output_bitmap_root { + Some(hash) => { + let hist_hash = (hash, header_hash).hash(); + if !self.output_bitmap_root_header_history.contains(&hist_hash) { + self.output_bitmap_root_header_history.push(hist_hash); + } + self.output_bitmap_root = Some(hash); + } + None => { + self.output_bitmap_root = None; + } + } + } +} + pub struct Peer { pub info: PeerInfo, state: Arc>, @@ -64,6 +124,8 @@ pub struct Peer { stop_handle: Mutex, // Whether or not we requested a txhashset from this peer state_sync_requested: Arc, + // PIBD available data status + pub pibd_status: Arc>, } impl fmt::Debug for Peer { @@ -78,18 +140,18 @@ impl Peer { info: PeerInfo, conn: TcpStream, adapter: Arc, - header_cache_size: u64, server: Server, ) -> std::io::Result { let state = Arc::new(RwLock::new(State::Connected)); let state_sync_requested = Arc::new(AtomicBool::new(false)); let tracking_adapter = TrackingAdapter::new(adapter); + let pibd_status = Arc::new(Mutex::new(PeerPibdStatus::default())); let handler = Protocol::new( Arc::new(tracking_adapter.clone()), info.clone(), state_sync_requested.clone(), - header_cache_size, server, + pibd_status.clone(), ); let tracker = Arc::new(conn::Tracker::new()); let (sendh, stoph) = conn::listen(conn, info.version, tracker.clone(), handler)?; @@ -103,6 +165,7 @@ impl Peer { send_handle, stop_handle, state_sync_requested, + pibd_status, }) } @@ -112,13 +175,12 @@ impl Peer { total_difficulty: Difficulty, hs: &Handshake, adapter: Arc, - header_cache_size: u64, server: Server, ) -> Result { debug!("accept: handshaking from {:?}", conn.peer_addr()); let info = hs.accept(capab, total_difficulty, &mut conn); match info { - Ok(info) => Ok(Peer::new(info, conn, adapter, header_cache_size, server)?), + Ok(info) => Ok(Peer::new(info, conn, adapter, server)?), Err(e) => { debug!( "accept: handshaking from {:?} failed with error: {:?}", @@ -140,7 +202,6 @@ impl Peer { self_addr: PeerAddr, hs: &Handshake, adapter: Arc, - header_cache_size: u64, peer_addr: Option, server: Server, ) -> Result { @@ -158,7 +219,7 @@ impl Peer { hs.initiate(capab, total_difficulty, self_addr, &mut conn, None) }; match info { - Ok(info) => Ok(Peer::new(info, conn, adapter, header_cache_size, server)?), + Ok(info) => Ok(Peer::new(info, conn, adapter, server)?), Err(e) => { if peer_addr.is_some() { debug!( @@ -236,27 +297,13 @@ impl Peer { /// Whether the peer is considered abusive, mostly for spammy nodes pub fn is_abusive(&self) -> bool { - let rec = self.tracker.received_bytes.read(); - let sent = self.tracker.sent_bytes.read(); - rec.count_per_min() > MAX_PEER_MSG_PER_MIN || sent.count_per_min() > MAX_PEER_MSG_PER_MIN - } - - /// Number of bytes sent to the peer - pub fn last_min_sent_bytes(&self) -> Option { - let sent_bytes = self.tracker.sent_bytes.read(); - Some(sent_bytes.bytes_per_min()) - } - - /// Number of bytes received from the peer - pub fn last_min_received_bytes(&self) -> Option { - let received_bytes = self.tracker.received_bytes.read(); - Some(received_bytes.bytes_per_min()) + let rec = self.tracker().received_bytes.read(); + rec.count_per_min() > MAX_PEER_MSG_PER_MIN } - pub fn last_min_message_counts(&self) -> Option<(u64, u64)> { - let received_bytes = self.tracker.received_bytes.read(); - let sent_bytes = self.tracker.sent_bytes.read(); - Some((sent_bytes.count_per_min(), received_bytes.count_per_min())) + /// Tracker tracks sent/received bytes and message counts per minute. + pub fn tracker(&self) -> &conn::Tracker { + &self.tracker } /// Set this peer status to banned @@ -411,11 +458,119 @@ impl Peer { ); self.state_sync_requested.store(true, Ordering::Relaxed); self.send( - &TxHashSetRequest { hash, height }, + &ArchiveHeaderData { hash, height }, msg::Type::TxHashSetRequest, ) } + pub fn send_start_pibd_sync_request(&self, height: u64, hash: Hash) -> Result<(), Error> { + info!( + "Asking peer {} for pibd sync at {} {}.", + self.info.addr, height, hash + ); + self.report_pibd_request(); + self.send( + &ArchiveHeaderData { hash, height }, + msg::Type::StartPibdSyncRequest, + ) + } + + fn report_pibd_request(&self) { + let mut pibd_status = self.pibd_status.lock(); + pibd_status.no_response_requests += 1; + if pibd_status.no_response_time.is_none() { + pibd_status.no_response_time = Some(Utc::now().timestamp()); + } + } + + pub fn send_bitmap_segment_request( + &self, + h: Hash, + identifier: SegmentIdentifier, + ) -> Result<(), Error> { + if log::log_enabled!(log::Level::Debug) { + let pibd_status = self.pibd_status.lock(); + debug!( + "Requesting peer {} for outputs bitmap, hash {}, id {}, output_bitmap_root: {:?}, height {}", + self.info.addr, h, identifier, pibd_status.output_bitmap_root, pibd_status.header_height + ); + } + self.report_pibd_request(); + self.send( + &SegmentRequest { + block_hash: h, + identifier, + }, + msg::Type::GetOutputBitmapSegment, + ) + } + + pub fn send_output_segment_request( + &self, + h: Hash, + identifier: SegmentIdentifier, + ) -> Result<(), Error> { + if log::log_enabled!(log::Level::Debug) { + let pibd_status = self.pibd_status.lock(); + debug!( + "Requesting peer {} for outputs, hash {}, id {}, output_bitmap_root: {:?}, height {}", + self.info.addr, h, identifier, pibd_status.output_bitmap_root, pibd_status.header_height + ); + } + self.report_pibd_request(); + self.send( + &SegmentRequest { + block_hash: h, + identifier, + }, + msg::Type::GetOutputSegment, + ) + } + + pub fn send_rangeproof_segment_request( + &self, + h: Hash, + identifier: SegmentIdentifier, + ) -> Result<(), Error> { + if log::log_enabled!(log::Level::Debug) { + let pibd_status = self.pibd_status.lock(); + debug!( + "Requesting peer {} for rangeproofs, hash {}, id {}, output_bitmap_root: {:?}, height {}", + self.info.addr, h, identifier, pibd_status.output_bitmap_root, pibd_status.header_height + ); + } + self.report_pibd_request(); + self.send( + &SegmentRequest { + block_hash: h, + identifier, + }, + msg::Type::GetRangeProofSegment, + ) + } + + pub fn send_kernel_segment_request( + &self, + h: Hash, + identifier: SegmentIdentifier, + ) -> Result<(), Error> { + if log::log_enabled!(log::Level::Debug) { + let pibd_status = self.pibd_status.lock(); + debug!( + "Requesting peer {} for kernels, hash {}, id {}, output_bitmap_root: {:?}, height {}", + self.info.addr, h, identifier, pibd_status.output_bitmap_root, pibd_status.header_height + ); + } + self.report_pibd_request(); + self.send( + &SegmentRequest { + block_hash: h, + identifier, + }, + msg::Type::GetKernelSegment, + ) + } + /// Stops the peer pub fn stop(&self) { debug!("Stopping peer {:?}", self.info.addr); @@ -433,6 +588,26 @@ impl Peer { None => error!("can't get stop lock for peer"), } } + + /// check if this peer ever commited for specific pibd hash + pub fn commited_to_pibd_bitmap_output_root( + &self, + output_bitmap_root_header_hash: &Hash, + ) -> bool { + let status = self.pibd_status.lock(); + status + .output_bitmap_root_header_history + .contains(&output_bitmap_root_header_hash) + } + + /// + pub fn get_pibd_no_response_state(&self) -> Option<(u32, i64)> { + let status = self.pibd_status.lock(); + match status.no_response_time { + None => None, + Some(time) => Some((status.no_response_requests, time)), + } + } } /// Adapter implementation that forwards everything to an underlying adapter @@ -565,7 +740,6 @@ impl ChainAdapter for TrackingAdapter { &self, bh: &[core::BlockHeader], peer_info: &PeerInfo, - header_sync_cache_size: u64, ) -> Result { trace!( "peer = {:?}, set header sync = false (in headers)", @@ -586,17 +760,7 @@ impl ChainAdapter for TrackingAdapter { peer_info.header_sync_requested.store(0, Ordering::Relaxed); } trace!("header sync for {} is {}", peer_info.addr, val); - self.adapter - .headers_received(bh, peer_info, header_sync_cache_size) - } - - // note: not needed because adapter is called from headers_received and header_recevied - fn process_add_headers_sync( - &self, - _: &[core::BlockHeader], - _: u64, - ) -> Result { - unimplemented!() + self.adapter.headers_received(bh, peer_info) } fn locate_headers(&self, locator: &[Hash]) -> Result, chain::Error> { @@ -645,6 +809,83 @@ impl ChainAdapter for TrackingAdapter { fn get_tmpfile_pathname(&self, tmpfile_name: String) -> PathBuf { self.adapter.get_tmpfile_pathname(tmpfile_name) } + + /// For MWC handshake we need to have a segmenter ready with output bitmap ready and commited. + fn prepare_segmenter(&self) -> Result { + self.adapter.prepare_segmenter() + } + + fn get_kernel_segment( + &self, + hash: Hash, + id: SegmentIdentifier, + ) -> Result, chain::Error> { + self.adapter.get_kernel_segment(hash, id) + } + + fn get_bitmap_segment( + &self, + hash: Hash, + id: SegmentIdentifier, + ) -> Result, chain::Error> { + self.adapter.get_bitmap_segment(hash, id) + } + + fn get_output_segment( + &self, + hash: Hash, + id: SegmentIdentifier, + ) -> Result, chain::Error> { + self.adapter.get_output_segment(hash, id) + } + + fn get_rangeproof_segment( + &self, + hash: Hash, + id: SegmentIdentifier, + ) -> Result, chain::Error> { + self.adapter.get_rangeproof_segment(hash, id) + } + + fn receive_bitmap_segment( + &self, + block_hash: Hash, + bitmap_root_hash: Hash, + segment: Segment, + ) -> Result { + self.adapter + .receive_bitmap_segment(block_hash, bitmap_root_hash, segment) + } + + fn receive_output_segment( + &self, + block_hash: Hash, + bitmap_root_hash: Hash, + segment: Segment, + ) -> Result { + self.adapter + .receive_output_segment(block_hash, bitmap_root_hash, segment) + } + + fn receive_rangeproof_segment( + &self, + block_hash: Hash, + bitmap_root_hash: Hash, + segment: Segment, + ) -> Result { + self.adapter + .receive_rangeproof_segment(block_hash, bitmap_root_hash, segment) + } + + fn receive_kernel_segment( + &self, + block_hash: Hash, + bitmap_root_hash: Hash, + segment: Segment, + ) -> Result { + self.adapter + .receive_kernel_segment(block_hash, bitmap_root_hash, segment) + } } impl NetAdapter for TrackingAdapter { diff --git a/p2p/src/peers.rs b/p2p/src/peers.rs index fe0b5ca077..992713ce6f 100644 --- a/p2p/src/peers.rs +++ b/p2p/src/peers.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,22 +18,26 @@ use std::fs::File; use std::path::PathBuf; use std::sync::Arc; -use rand::seq::SliceRandom; -use rand::thread_rng; +use rand::prelude::*; use crate::chain; -use crate::core::core; -use crate::core::core::hash::{Hash, Hashed}; -use crate::core::global; -use crate::core::pow::Difficulty; +use crate::chain::txhashset::BitmapChunk; +use crate::grin_core::core; +use crate::grin_core::core::hash::{Hash, Hashed}; +use crate::grin_core::core::{OutputIdentifier, Segment, SegmentIdentifier, TxKernel}; +use crate::grin_core::global; +use crate::grin_core::pow::Difficulty; +use crate::msg::PeerAddrs; use crate::peer::Peer; use crate::store::{PeerData, PeerStore, State}; use crate::types::{ Capabilities, ChainAdapter, Error, NetAdapter, P2PConfig, PeerAddr, PeerInfo, ReasonForBan, TxHashSetRead, MAX_PEER_ADDRS, }; +use crate::util::secp::pedersen::RangeProof; use chrono::prelude::*; use chrono::Duration; +use grin_chain::txhashset::Segmenter; use grin_util::StopState; const LOCK_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(2); @@ -65,27 +69,28 @@ impl Peers { /// Adds the peer to our internal peer mapping. Note that the peer is still /// returned so the server can run it. pub fn add_connected(&self, peer: Arc) -> Result<(), Error> { - let mut peers = self.peers.try_write_for(LOCK_TIMEOUT).ok_or_else(|| { - error!("add_connected: failed to get peers lock"); - Error::Timeout - })?; - - if self.is_banned(peer.info.addr.clone()) { - return Err(Error::Banned); + let peer_data: PeerData; + { + // Scope for peers vector lock - dont hold the peers lock while adding to lmdb + let mut peers = self.peers.try_write_for(LOCK_TIMEOUT).ok_or_else(|| { + error!("add_connected: failed to get peers lock"); + Error::Timeout + })?; + peer_data = PeerData { + addr: peer.info.addr.clone(), + capabilities: peer.info.capabilities, + user_agent: peer.info.user_agent.clone(), + flags: State::Healthy, + last_banned: 0, + ban_reason: ReasonForBan::None, + last_connected: Utc::now().timestamp(), + }; + debug!("Adding newly connected peer {}.", peer_data.addr); + peers.insert(peer_data.addr.clone(), peer); + } + if let Err(e) = self.save_peer(&peer_data) { + error!("Could not save connected peer address: {:?}", e); } - let peer_data = PeerData { - addr: peer.info.addr.clone(), - capabilities: peer.info.capabilities, - user_agent: peer.info.user_agent.clone(), - flags: State::Healthy, - last_banned: 0, - ban_reason: ReasonForBan::None, - last_connected: Utc::now().timestamp(), - }; - debug!("Saving newly connected peer {}.", peer_data.addr); - self.save_peer(&peer_data)?; - peers.insert(peer_data.addr, peer); - Ok(()) } @@ -117,149 +122,28 @@ impl Peers { Ok(peers.contains_key(&addr)) } - /// Get vec of peers we are currently connected to. - pub fn connected_peers(&self) -> Vec> { + /// Iterator over our current peers. + /// This allows us to hide try_read_for() behind a cleaner interface. + /// PeersIter lets us chain various adaptors for convenience. + pub fn iter(&self) -> PeersIter>> { let peers = match self.peers.try_read_for(LOCK_TIMEOUT) { - Some(peers) => peers, + Some(peers) => peers.values().cloned().collect(), None => { if !self.stop_state.is_stopped() { // When stopped, peers access is locked by stopped thread error!("connected_peers: failed to get peers lock"); } - return vec![]; + vec![] } }; - let mut res = peers - .values() - .filter(|p| p.is_connected()) - .cloned() - .collect::>(); - res.shuffle(&mut thread_rng()); - res - } - - /// Get vec of peers we currently have an outgoing connection with. - pub fn outgoing_connected_peers(&self) -> Vec> { - self.connected_peers() - .into_iter() - .filter(|x| x.info.is_outbound()) - .collect() - } - - /// Get vec of peers we currently have an incoming connection with. - pub fn incoming_connected_peers(&self) -> Vec> { - self.connected_peers() - .into_iter() - .filter(|x| x.info.is_inbound()) - .collect() + PeersIter { + iter: peers.into_iter(), + } } /// Get a peer we're connected to by address. pub fn get_connected_peer(&self, addr: PeerAddr) -> Option> { - if self.stop_state.is_stopped() { - return None; - } - - let peers = match self.peers.try_read_for(LOCK_TIMEOUT) { - Some(peers) => peers, - None => { - if !self.stop_state.is_stopped() { - // When stopped, peers access is locked by stopped thread - error!("get_connected_peer: failed to get peers lock"); - } - return None; - } - }; - peers.get(&addr).cloned() - } - - /// Number of peers currently connected to. - pub fn peer_count(&self) -> u32 { - self.connected_peers().len() as u32 - } - - /// Number of outbound peers currently connected to. - pub fn peer_outbound_count(&self) -> u32 { - self.outgoing_connected_peers().len() as u32 - } - - /// Number of inbound peers currently connected to. - pub fn peer_inbound_count(&self) -> u32 { - self.incoming_connected_peers().len() as u32 - } - - // Return vec of connected peers that currently advertise more work - // (total_difficulty) than we do. - pub fn more_work_peers(&self) -> Result>, chain::Error> { - let peers = self.connected_peers(); - if peers.is_empty() { - return Ok(vec![]); - } - - let total_difficulty = self.total_difficulty()?; - - let mut max_peers = peers - .into_iter() - .filter(|x| x.info.total_difficulty() > total_difficulty) - .collect::>(); - - max_peers.shuffle(&mut thread_rng()); - Ok(max_peers) - } - - // Return number of connected peers that currently advertise more/same work - // (total_difficulty) than/as we do. - pub fn more_or_same_work_peers(&self) -> Result { - let peers = self.connected_peers(); - if peers.is_empty() { - return Ok(0); - } - - let total_difficulty = self.total_difficulty()?; - - Ok(peers - .iter() - .filter(|x| x.info.total_difficulty() >= total_difficulty) - .count()) - } - - /// Returns single random peer with more work than us. - pub fn more_work_peer(&self) -> Option> { - match self.more_work_peers() { - Ok(mut peers) => peers.pop(), - Err(e) => { - error!("failed to get more work peers: {:?}", e); - None - } - } - } - - /// Return vec of connected peers that currently have the most worked - /// branch, showing the highest total difficulty. - pub fn most_work_peers(&self) -> Vec> { - let peers = self.connected_peers(); - if peers.is_empty() { - return vec![]; - } - - let max_total_difficulty = match peers.iter().map(|x| x.info.total_difficulty()).max() { - Some(v) => v, - None => return vec![], - }; - - let mut max_peers = peers - .into_iter() - .filter(|x| x.info.total_difficulty() == max_total_difficulty) - .collect::>(); - - max_peers.shuffle(&mut thread_rng()); - max_peers - } - - /// Returns single random peer with the most worked branch, showing the - /// highest total difficulty. - pub fn most_work_peer(&self) -> Option> { - self.most_work_peers().pop() + self.iter().connected().by_addr(addr) } pub fn is_banned(&self, peer_addr: PeerAddr) -> bool { @@ -270,8 +154,10 @@ impl Peers { } /// Ban a peer, disconnecting it if we're currently connected pub fn ban_peer(&self, peer_addr: PeerAddr, ban_reason: ReasonForBan) -> Result<(), Error> { + // Update the peer in peers db self.update_state(peer_addr.clone(), State::Banned)?; + // Update the peer in the peers Vec match self.get_connected_peer(peer_addr.clone()) { Some(peer) => { info!("Banning peer {}, ban_reason {:?}", peer_addr, ban_reason); @@ -308,7 +194,7 @@ impl Peers { { let mut count = 0; - for p in self.connected_peers().iter() { + for p in self.iter().connected() { match inner(&p) { Ok(true) => count += 1, Ok(false) => (), @@ -375,7 +261,7 @@ impl Peers { /// Ping all our connected peers. Always automatically expects a pong back /// or disconnects. This acts as a liveness test. pub fn check_all(&self, total_difficulty: Difficulty, height: u64) { - for p in self.connected_peers().iter() { + for p in self.iter().connected() { if let Err(e) = p.send_ping(total_difficulty, height) { debug!("Error pinging peer {:?}: {:?}", &p.info.addr, e); let mut peers = match self.peers.try_write_for(LOCK_TIMEOUT) { @@ -391,15 +277,16 @@ impl Peers { } } - /// All peer information we have in storage - pub fn all_peers(&self) -> Vec { - match self.store.all_peers() { - Ok(peers) => peers, - Err(e) => { - error!("all_peers failed: {:?}", e); - vec![] - } - } + /// Iterator over all peers we know about (stored in our db). + pub fn peer_data_iter(&self) -> Result, Error> { + self.store.peers_iter().map_err(From::from) + } + + /// Convenience for reading all peer data from the db. + pub fn all_peer_data(&self) -> Vec { + self.peer_data_iter() + .map(|peers| peers.collect()) + .unwrap_or(vec![]) } /// Find peers in store (not necessarily connected) and return their data @@ -428,6 +315,11 @@ impl Peers { self.store.save_peer(p).map_err(From::from) } + /// Saves updated information about mulitple peers in batch + pub fn save_peers(&self, p: Vec) -> Result<(), Error> { + self.store.save_peers(p).map_err(From::from) + } + /// Updates the state of a peer in store pub fn update_state(&self, peer_addr: PeerAddr, new_state: State) -> Result<(), Error> { self.store @@ -442,20 +334,16 @@ impl Peers { &self, max_inbound_count: usize, max_outbound_count: usize, - preferred_peers: &[PeerAddr], + config: P2PConfig, ) { + let preferred_peers = config.peers_preferred.unwrap_or(PeerAddrs::default()); + let mut rm = vec![]; // build a list of peers to be cleaned up { - let peers = match self.peers.try_read_for(LOCK_TIMEOUT) { - Some(peers) => peers, - None => { - error!("clean_peers: can't get peers lock"); - return; - } - }; - for peer in peers.values() { + for peer in self.iter() { + let ref peer: &Peer = peer.as_ref(); if peer.is_banned() { debug!("clean_peers {:?}, peer banned", peer.info.addr); rm.push(peer.info.addr.clone()); @@ -463,12 +351,12 @@ impl Peers { debug!("clean_peers {:?}, not connected", peer.info.addr); rm.push(peer.info.addr.clone()); } else if peer.is_abusive() { - if let Some(counts) = peer.last_min_message_counts() { - debug!( - "clean_peers {:?}, abusive ({} sent, {} recv)", - peer.info.addr, counts.0, counts.1, - ); - } + let received = peer.tracker().received_bytes.read().count_per_min(); + let sent = peer.tracker().sent_bytes.read().count_per_min(); + debug!( + "clean_peers {:?}, abusive ({} sent, {} recv)", + peer.info.addr, sent, received, + ); let _ = self.update_state(peer.info.addr.clone(), State::Banned); rm.push(peer.info.addr.clone()); } else { @@ -487,27 +375,34 @@ impl Peers { } } + // closure to build an iterator of our inbound peers + let outbound_peers = || self.iter().outbound().connected().into_iter(); + // check here to make sure we don't have too many outgoing connections - let excess_outgoing_count = - (self.peer_outbound_count() as usize).saturating_sub(max_outbound_count); + // Preferred peers are treated preferentially here. + // Also choose outbound peers with lowest total difficulty to drop. + let excess_outgoing_count = outbound_peers().count().saturating_sub(max_outbound_count); if excess_outgoing_count > 0 { - let mut addrs: Vec<_> = self - .outgoing_connected_peers() - .iter() - .filter(|x| !preferred_peers.contains(&x.info.addr)) + let mut peer_infos: Vec<_> = outbound_peers() + .map(|x| x.info.clone()) + .filter(|x| !preferred_peers.contains(&x.addr)) + .collect(); + peer_infos.sort_unstable_by_key(|x| x.total_difficulty()); + let mut addrs = peer_infos + .into_iter() + .map(|x| x.addr) .take(excess_outgoing_count) - .map(|x| x.info.addr.clone()) .collect(); rm.append(&mut addrs); } + // closure to build an iterator of our inbound peers + let inbound_peers = || self.iter().inbound().connected().into_iter(); + // check here to make sure we don't have too many incoming connections - let excess_incoming_count = - (self.peer_inbound_count() as usize).saturating_sub(max_inbound_count); + let excess_incoming_count = inbound_peers().count().saturating_sub(max_inbound_count); if excess_incoming_count > 0 { - let mut addrs: Vec<_> = self - .incoming_connected_peers() - .iter() + let mut addrs: Vec<_> = inbound_peers() .filter(|x| !preferred_peers.contains(&x.info.addr)) .take(excess_incoming_count) .map(|x| x.info.addr.clone()) @@ -543,7 +438,8 @@ impl Peers { /// We have enough outbound connected peers pub fn enough_outbound_peers(&self) -> bool { - self.peer_outbound_count() >= self.config.peer_min_preferred_outbound_count() + self.iter().outbound().connected().count() + >= self.config.peer_min_preferred_outbound_count() as usize } /// Removes those peers that seem to have expired @@ -552,7 +448,7 @@ impl Peers { // Delete defunct peers from storage let _ = self.store.delete_peers(|peer| { - let diff = now - Utc.timestamp(peer.last_connected, 0); + let diff = now - Utc.timestamp_opt(peer.last_connected, 0).unwrap(); let should_remove = peer.flags == State::Defunct && diff > Duration::seconds(global::PEER_EXPIRATION_REMOVE_TIME); @@ -617,7 +513,7 @@ impl ChainAdapter for Peers { peer_info.addr.clone(), ); self.ban_peer(peer_info.addr.clone(), ReasonForBan::BadBlock) - .map_err(|e| chain::ErrorKind::Other(format!("ban peer error {}", e)))?; + .map_err(|e| chain::Error::Other(format!("ban peer error {}", e)))?; Ok(false) } else { Ok(true) @@ -639,7 +535,7 @@ impl ChainAdapter for Peers { peer_info.addr.clone() ); self.ban_peer(peer_info.addr.clone(), ReasonForBan::BadCompactBlock) - .map_err(|e| chain::ErrorKind::Other(format!("ban peer error {}", e)))?; + .map_err(|e| chain::Error::Other(format!("ban peer error {}", e)))?; Ok(false) } else { Ok(true) @@ -655,7 +551,7 @@ impl ChainAdapter for Peers { // if the peer sent us a block header that's intrinsically bad // they are either mistaken or malevolent, both of which require a ban self.ban_peer(peer_info.addr.clone(), ReasonForBan::BadBlockHeader) - .map_err(|e| chain::ErrorKind::Other(format!("ban peer error {}", e)))?; + .map_err(|e| chain::Error::Other(format!("ban peer error {}", e)))?; Ok(false) } else { Ok(true) @@ -666,31 +562,18 @@ impl ChainAdapter for Peers { &self, headers: &[core::BlockHeader], peer_info: &PeerInfo, - header_sync_cache_size: u64, ) -> Result { - if !self - .adapter - .headers_received(headers, peer_info, header_sync_cache_size)? - { + if !self.adapter.headers_received(headers, peer_info)? { // if the peer sent us a block header that's intrinsically bad // they are either mistaken or malevolent, both of which require a ban self.ban_peer(peer_info.addr.clone(), ReasonForBan::BadBlockHeader) - .map_err(|e| chain::ErrorKind::Other(format!("ban peer error {}", e)))?; + .map_err(|e| chain::Error::Other(format!("ban peer error: {}", e)))?; Ok(false) } else { Ok(true) } } - // note not needed to implement because adapter is called by headers_received and header_received - fn process_add_headers_sync( - &self, - _: &[core::BlockHeader], - _: u64, - ) -> Result { - unimplemented!() - } - fn locate_headers(&self, hs: &[Hash]) -> Result, chain::Error> { self.adapter.locate_headers(hs) } @@ -723,7 +606,7 @@ impl ChainAdapter for Peers { peer_info.addr.clone() ); self.ban_peer(peer_info.addr.clone(), ReasonForBan::BadTxHashSet) - .map_err(|e| chain::ErrorKind::Other(format!("ban peer error {}", e)))?; + .map_err(|e| chain::Error::Other(format!("ban peer error {}", e)))?; Ok(true) } else { Ok(false) @@ -747,6 +630,83 @@ impl ChainAdapter for Peers { fn get_tmpfile_pathname(&self, tmpfile_name: String) -> PathBuf { self.adapter.get_tmpfile_pathname(tmpfile_name) } + + /// For MWC handshake we need to have a segmenter ready with output bitmap ready and commited. + fn prepare_segmenter(&self) -> Result { + self.adapter.prepare_segmenter() + } + + fn get_kernel_segment( + &self, + hash: Hash, + id: SegmentIdentifier, + ) -> Result, chain::Error> { + self.adapter.get_kernel_segment(hash, id) + } + + fn get_bitmap_segment( + &self, + hash: Hash, + id: SegmentIdentifier, + ) -> Result, chain::Error> { + self.adapter.get_bitmap_segment(hash, id) + } + + fn get_output_segment( + &self, + hash: Hash, + id: SegmentIdentifier, + ) -> Result, chain::Error> { + self.adapter.get_output_segment(hash, id) + } + + fn get_rangeproof_segment( + &self, + hash: Hash, + id: SegmentIdentifier, + ) -> Result, chain::Error> { + self.adapter.get_rangeproof_segment(hash, id) + } + + fn receive_bitmap_segment( + &self, + block_hash: Hash, + bitmap_root_hash: Hash, + segment: Segment, + ) -> Result { + self.adapter + .receive_bitmap_segment(block_hash, bitmap_root_hash, segment) + } + + fn receive_output_segment( + &self, + block_hash: Hash, + bitmap_root_hash: Hash, + segment: Segment, + ) -> Result { + self.adapter + .receive_output_segment(block_hash, bitmap_root_hash, segment) + } + + fn receive_rangeproof_segment( + &self, + block_hash: Hash, + bitmap_root_hash: Hash, + segment: Segment, + ) -> Result { + self.adapter + .receive_rangeproof_segment(block_hash, bitmap_root_hash, segment) + } + + fn receive_kernel_segment( + &self, + block_hash: Hash, + bitmap_root_hash: Hash, + segment: Segment, + ) -> Result { + self.adapter + .receive_kernel_segment(block_hash, bitmap_root_hash, segment) + } } impl NetAdapter for Peers { @@ -761,6 +721,7 @@ impl NetAdapter for Peers { /// A list of peers has been received from one of our peers. fn peer_addrs_received(&self, peer_addrs: Vec) { trace!("Received {} peer addrs, saving.", peer_addrs.len()); + let mut to_save: Vec = Vec::new(); for pa in peer_addrs { if let Ok(e) = self.exists_peer(pa.clone()) { if e { @@ -776,9 +737,10 @@ impl NetAdapter for Peers { ban_reason: ReasonForBan::None, last_connected: Utc::now().timestamp(), }; - if let Err(e) = self.save_peer(&peer) { - error!("Could not save received peer address: {:?}", e); - } + to_save.push(peer); + } + if let Err(e) = self.save_peers(to_save) { + error!("Could not save received peer addresses: {:?}", e); } } @@ -796,3 +758,86 @@ impl NetAdapter for Peers { } } } + +pub struct PeersIter { + iter: I, +} + +impl IntoIterator for PeersIter { + type Item = I::Item; + type IntoIter = I; + + fn into_iter(self) -> Self::IntoIter { + self.iter.into_iter() + } +} + +impl>> PeersIter { + /// Filter peers that are currently connected. + /// Note: This adaptor takes a read lock internally. + /// So if we are chaining adaptors then defer this toward the end of the chain. + pub fn connected(self) -> PeersIter>> { + PeersIter { + iter: self.iter.filter(|p| p.is_connected()), + } + } + + /// Filter inbound peers. + pub fn inbound(self) -> PeersIter>> { + PeersIter { + iter: self.iter.filter(|p| p.info.is_inbound()), + } + } + + /// Filter outbound peers. + pub fn outbound(self) -> PeersIter>> { + PeersIter { + iter: self.iter.filter(|p| p.info.is_outbound()), + } + } + + /// Filter peers with the provided difficulty comparison fn. + /// + /// with_difficulty(|x| x > diff) + /// + /// Note: This adaptor takes a read lock internally for each peer. + /// So if we are chaining adaptors then put this toward later in the chain. + pub fn with_difficulty(self, f: F) -> PeersIter>> + where + F: Fn(Difficulty) -> bool, + { + PeersIter { + iter: self.iter.filter(move |p| f(p.info.total_difficulty())), + } + } + + /// Filter peers that support the provided capabilities. + pub fn with_capabilities( + self, + cap: Capabilities, + ) -> PeersIter>> { + PeersIter { + iter: self.iter.filter(move |p| p.info.capabilities.contains(cap)), + } + } + + pub fn by_addr(&mut self, addr: PeerAddr) -> Option> { + self.iter.find(|p| p.info.addr == addr) + } + + /// Choose a random peer from the current (filtered) peers. + pub fn choose_random(self) -> Option> { + let mut rng = rand::thread_rng(); + self.iter.choose(&mut rng) + } + + /// Find the max difficulty of the current (filtered) peers. + pub fn max_difficulty(self) -> Option { + self.iter.map(|p| p.info.total_difficulty()).max() + } + + /// Count the current (filtered) peers. + pub fn count(self) -> usize { + self.iter.count() + } +} diff --git a/p2p/src/protocol.rs b/p2p/src/protocol.rs index e7bb4f57a3..9c33245138 100644 --- a/p2p/src/protocol.rs +++ b/p2p/src/protocol.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,35 +12,32 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::chain; -use crate::conn::{Message, MessageHandler, Tracker}; -use crate::core::core::{self, hash::Hash, hash::Hashed, CompactBlock}; -use crate::serv::Server; -use crate::types::PeerAddr::Onion; +use crate::conn::MessageHandler; +use crate::grin_core::core::{hash::Hashed, CompactBlock}; +use crate::{chain, Capabilities}; use crate::msg::{ - BanReason, GetPeerAddrs, Headers, Locator, Msg, PeerAddrs, Ping, Pong, TorAddress, - TxHashSetArchive, TxHashSetRequest, Type, + ArchiveHeaderData, Consumed, Headers, Message, Msg, OutputBitmapSegmentResponse, + OutputSegmentResponse, PeerAddrs, PibdSyncState, Pong, SegmentRequest, SegmentResponse, + TxHashSetArchive, Type, }; - -use crate::types::Capabilities; -use crate::types::PeerAddr; -use crate::types::{Error, NetAdapter, PeerInfo}; +use crate::peer::PeerPibdStatus; +use crate::serv::Server; +use crate::types::{AttachmentMeta, Error, NetAdapter, PeerAddr, PeerAddr::Onion, PeerInfo}; use chrono::prelude::Utc; +use grin_core::core::hash::Hash; +use grin_util::Mutex; use rand::{thread_rng, Rng}; -use std::cmp; -use std::fs::{self, File, OpenOptions}; -use std::io::{BufWriter, Read}; +use std::fs::{self, File}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; -use std::time::Instant; pub struct Protocol { adapter: Arc, peer_info: PeerInfo, state_sync_requested: Arc, - header_cache_size: u64, server: Server, + pibd_status: Arc>, } impl Protocol { @@ -48,179 +45,173 @@ impl Protocol { adapter: Arc, peer_info: PeerInfo, state_sync_requested: Arc, - header_cache_size: u64, server: Server, + pibd_status: Arc>, ) -> Protocol { Protocol { adapter, peer_info, state_sync_requested, - header_cache_size, server, + pibd_status, } } + + fn report_pibd_response(&self, success: bool) { + if success { + let mut pibd_status = self.pibd_status.lock(); + pibd_status.no_response_requests = 0; + pibd_status.no_response_time = None; + } + } + + fn get_peer_output_bitmap_root(&self) -> Option { + let pibd_status = self.pibd_status.lock(); + pibd_status.output_bitmap_root.clone() + } } impl MessageHandler for Protocol { - fn consume( - &mut self, - mut msg: Message, - stopped: Arc, - tracker: Arc, - ) -> Result, Error> { + fn consume(&self, message: Message) -> Result { let adapter = &self.adapter; - let header_cache_size = self.header_cache_size; // If we received a msg from a banned peer then log and drop it. // If we are getting a lot of these then maybe we are not cleaning // banned peers up correctly? if adapter.is_banned(self.peer_info.addr.clone()) { debug!( - "handler: consume: peer {:?} banned, received: {:?}, dropping.", - self.peer_info.addr, msg.header.msg_type, + "handler: consume: peer {:?} banned, received: {}, dropping.", + self.peer_info.addr, message, ); - return Ok(None); + return Ok(Consumed::Disconnect); } - match msg.header.msg_type { - Type::Ping => { - let ping: Ping = msg.body()?; + let consumed = match message { + Message::Attachment(update, _) => { + self.adapter.txhashset_download_update( + update.meta.start_time, + (update.meta.size - update.left) as u64, + update.meta.size as u64, + ); + + if update.left == 0 { + let meta = update.meta; + trace!( + "handle_payload: txhashset archive save to file {:?} success", + meta.path, + ); + + let zip = File::open(meta.path.clone())?; + let res = + self.adapter + .txhashset_write(meta.hash.clone(), zip, &self.peer_info)?; + + debug!( + "handle_payload: txhashset archive for {} at {}, DONE. Data Ok: {}", + meta.hash, meta.height, !res + ); + + if let Err(e) = fs::remove_file(meta.path.clone()) { + warn!("fail to remove tmp file: {:?}. err: {}", meta.path, e); + } + } + + Consumed::None + } + + Message::Ping(ping) => { adapter.peer_difficulty( self.peer_info.addr.clone(), ping.total_difficulty, ping.height, ); - - Ok(Some(Msg::new( + Consumed::Response(Msg::new( Type::Pong, Pong { total_difficulty: adapter.total_difficulty()?, height: adapter.total_height()?, }, self.peer_info.version, - )?)) + )?) } - Type::Pong => { - let pong: Pong = msg.body()?; + Message::Pong(pong) => { adapter.peer_difficulty( self.peer_info.addr.clone(), pong.total_difficulty, pong.height, ); - Ok(None) + Consumed::None } - Type::BanReason => { - let ban_reason: BanReason = msg.body()?; + Message::BanReason(ban_reason) => { error!("handle_payload: BanReason {:?}", ban_reason); - Ok(None) + Consumed::Disconnect } - Type::TransactionKernel => { - let h: Hash = msg.body()?; - debug!( - "handle_payload: received tx kernel: {}, msg_len: {}", - h, msg.header.msg_len - ); + Message::TransactionKernel(h) => { + debug!("handle_payload: received tx kernel: {}", h); adapter.tx_kernel_received(h, &self.peer_info)?; - Ok(None) + Consumed::None } - Type::GetTransaction => { - let h: Hash = msg.body()?; - debug!( - "handle_payload: GetTransaction: {}, msg_len: {}", - h, msg.header.msg_len, - ); + Message::GetTransaction(h) => { + debug!("handle_payload: GetTransaction: {}", h); let tx = adapter.get_transaction(h); if let Some(tx) = tx { - Ok(Some(Msg::new( - Type::Transaction, - tx, - self.peer_info.version, - )?)) + Consumed::Response(Msg::new(Type::Transaction, tx, self.peer_info.version)?) } else { - Ok(None) + Consumed::None } } - Type::Transaction => { - debug!( - "handle_payload: received tx: msg_len: {}", - msg.header.msg_len - ); - let tx: core::Transaction = msg.body()?; + Message::Transaction(tx) => { + debug!("handle_payload: received tx"); adapter.transaction_received(tx, false)?; - Ok(None) + Consumed::None } - Type::StemTransaction => { - debug!( - "handle_payload: received stem tx: msg_len: {}", - msg.header.msg_len - ); - let tx: core::Transaction = msg.body()?; + Message::StemTransaction(tx) => { + debug!("handle_payload: received stem tx"); adapter.transaction_received(tx, true)?; - Ok(None) + Consumed::None } - Type::GetBlock => { - let h: Hash = msg.body()?; - trace!( - "handle_payload: GetBlock: {}, msg_len: {}", - h, - msg.header.msg_len, - ); - + Message::GetBlock(h) => { + trace!("handle_payload: GetBlock: {}", h); let bo = adapter.get_block(h, &self.peer_info); if let Some(b) = bo { - return Ok(Some(Msg::new(Type::Block, b, self.peer_info.version)?)); + Consumed::Response(Msg::new(Type::Block, b, self.peer_info.version)?) + } else { + Consumed::None } - Ok(None) } - Type::Block => { - debug!( - "handle_payload: received block: msg_len: {}", - msg.header.msg_len - ); - let b: core::UntrustedBlock = msg.body()?; - + Message::Block(b) => { + debug!("handle_payload: received block"); // We default to NONE opts here as we do not know know yet why this block was // received. // If we requested this block from a peer due to our node syncing then // the peer adapter will override opts to reflect this. adapter.block_received(b.into(), &self.peer_info, chain::Options::NONE)?; - Ok(None) + Consumed::None } - Type::GetCompactBlock => { - let h: Hash = msg.body()?; + Message::GetCompactBlock(h) => { if let Some(b) = adapter.get_block(h, &self.peer_info) { let cb: CompactBlock = b.into(); - Ok(Some(Msg::new( - Type::CompactBlock, - cb, - self.peer_info.version, - )?)) + Consumed::Response(Msg::new(Type::CompactBlock, cb, self.peer_info.version)?) } else { - Ok(None) + Consumed::None } } - Type::CompactBlock => { - debug!( - "handle_payload: received compact block: msg_len: {}", - msg.header.msg_len - ); - let b: core::UntrustedCompactBlock = msg.body()?; - + Message::CompactBlock(b) => { + debug!("handle_payload: received compact block"); adapter.compact_block_received(b.into(), &self.peer_info)?; - Ok(None) + Consumed::None } - Type::TorAddress => { - let tor_address: TorAddress = msg.body()?; + Message::TorAddress(tor_address) => { info!( "TorAddress received from {:?}, address = {:?}", self.peer_info, tor_address @@ -239,61 +230,34 @@ impl MessageHandler for Protocol { self.server.peers.save_peer(&peer)?; } } - Ok(None) + Consumed::None } - Type::GetHeaders => { + Message::GetHeaders(loc) => { // load headers from the locator - let loc: Locator = msg.body()?; let headers = adapter.locate_headers(&loc.hashes)?; // serialize and send all the headers over - Ok(Some(Msg::new( + Consumed::Response(Msg::new( Type::Headers, Headers { headers }, self.peer_info.version, - )?)) + )?) } // "header first" block propagation - if we have not yet seen this block // we can go request it from some of our peers - Type::Header => { - let header: core::UntrustedBlockHeader = msg.body()?; + Message::Header(header) => { adapter.header_received(header.into(), &self.peer_info)?; - Ok(None) + Consumed::None } - Type::Headers => { - let mut total_bytes_read = 0; - - // Read the count (u16) so we now how many headers to read. - let (count, bytes_read): (u16, _) = msg.streaming_read()?; - total_bytes_read += bytes_read; - - // Read chunks of headers off the stream and pass them off to the adapter. - let chunk_size = 32u16; - let mut headers = Vec::with_capacity(chunk_size as usize); - for i in 1..=count { - let (header, bytes_read) = - msg.streaming_read::()?; - headers.push(header.into()); - total_bytes_read += bytes_read; - if i % chunk_size == 0 || i == count { - adapter.headers_received(&headers, &self.peer_info, header_cache_size)?; - headers.clear(); - } - } - - // Now check we read the correct total number of bytes off the stream. - if total_bytes_read != msg.header.msg_len { - return Err(Error::MsgLen); - } - - Ok(None) + Message::Headers(data) => { + adapter.headers_received(&data.headers, &self.peer_info)?; + Consumed::None } - Type::GetPeerAddrs => { - let get_peers: GetPeerAddrs = msg.body()?; + Message::GetPeerAddrs(get_peers) => { let peers = adapter.find_peer_addrs(get_peers.capabilities & !Capabilities::TOR_ADDRESS); @@ -314,15 +278,14 @@ impl MessageHandler for Protocol { peers }; - Ok(Some(Msg::new( + Consumed::Response(Msg::new( Type::PeerAddrs, PeerAddrs { peers }, self.peer_info.version, - )?)) + )?) } - Type::PeerAddrs => { - let peer_addrs: PeerAddrs = msg.body()?; + Message::PeerAddrs(peer_addrs) => { let mut peers: Vec = Vec::new(); for peer in peer_addrs.peers { match peer.clone() { @@ -344,10 +307,10 @@ impl MessageHandler for Protocol { } } adapter.peer_addrs_received(peers); - Ok(None) + Consumed::None } - Type::TxHashSetRequest => { - let sm_req: TxHashSetRequest = msg.body()?; + + Message::TxHashSetRequest(sm_req) => { debug!( "handle_payload: txhashset req for {} at {}", sm_req.hash, sm_req.height @@ -369,14 +332,13 @@ impl MessageHandler for Protocol { self.peer_info.version, )?; resp.add_attachment(txhashset.reader); - Ok(Some(resp)) + Consumed::Response(resp) } else { - Ok(None) + Consumed::None } } - Type::TxHashSetArchive => { - let sm_arch: TxHashSetArchive = msg.body()?; + Message::TxHashSetArchive(sm_arch) => { info!( "handle_payload: txhashset archive for {} at {}. size={}", sm_arch.hash, sm_arch.height, sm_arch.bytes, @@ -394,95 +356,291 @@ impl MessageHandler for Protocol { // Update the sync state requested status self.state_sync_requested.store(false, Ordering::Relaxed); - let download_start_time = Utc::now(); + let start_time = Utc::now(); self.adapter - .txhashset_download_update(download_start_time, 0, sm_arch.bytes); + .txhashset_download_update(start_time, 0, sm_arch.bytes); let nonce: u32 = thread_rng().gen_range(0, 1_000_000); - let tmp = self.adapter.get_tmpfile_pathname(format!( + let path = self.adapter.get_tmpfile_pathname(format!( "txhashset-{}-{}.zip", - download_start_time.timestamp(), + start_time.timestamp(), nonce )); - let mut now = Instant::now(); - let mut save_txhashset_to_file = |file| -> Result<(), Error> { - let mut tmp_zip = - BufWriter::new(OpenOptions::new().write(true).create_new(true).open(file)?); - let total_size = sm_arch.bytes as usize; - let mut downloaded_size: usize = 0; - let mut request_size = cmp::min(48_000, total_size); - while request_size > 0 { - let size = msg.copy_attachment(request_size, &mut tmp_zip)?; - downloaded_size += size; - request_size = cmp::min(48_000, total_size - downloaded_size); - self.adapter.txhashset_download_update( - download_start_time, - downloaded_size as u64, - total_size as u64, - ); - if now.elapsed().as_secs() > 10 { - now = Instant::now(); - debug!( - "handle_payload: txhashset archive: {}/{}", - downloaded_size, total_size - ); - } - // Increase received bytes quietly (without affecting the counters). - // Otherwise we risk banning a peer as "abusive". - tracker.inc_quiet_received(size as u64); - - // check the close channel - if stopped.load(Ordering::Relaxed) { - debug!("stopping txhashset download early"); - return Err(Error::ConnectionClose); - } - } - debug!( - "handle_payload: txhashset archive: {}/{} ... DONE", - downloaded_size, total_size - ); - tmp_zip - .into_inner() - .map_err(|e| { - Error::Internal(format!("Unable to save txhashset data, {}", e)) - })? - .sync_all()?; - Ok(()) + + let file = fs::OpenOptions::new() + .write(true) + .create_new(true) + .open(path.clone())?; + + let meta = AttachmentMeta { + size: sm_arch.bytes as usize, + hash: sm_arch.hash, + height: sm_arch.height, + start_time, + path, }; - if let Err(e) = save_txhashset_to_file(tmp.clone()) { - error!( - "handle_payload: txhashset archive save to file fail. err={:?}", - e + Consumed::Attachment(Arc::new(meta), file) + } + Message::StartPibdSyncRequest(sm_req) => { + debug!( + "handle_payload: start PIBD request for {} at {}", + sm_req.hash, sm_req.height + ); + match self.adapter.prepare_segmenter() { + Ok(segmenter) => { + let header = segmenter.header(); + let header_hash = header.hash(); + if header_hash == sm_req.hash && header.height == sm_req.height { + if let Ok(bitmap_root_hash) = segmenter.bitmap_root() { + // we can start the sync process, let's prepare the segmenter + Consumed::Response(Msg::new( + Type::PibdSyncState, + &PibdSyncState { + header_height: header.height, + header_hash: header_hash, + output_bitmap_root: bitmap_root_hash, + }, + self.peer_info.version, + )?) + } else { + Consumed::None + } + } else { + Consumed::Response(Msg::new( + Type::HasAnotherArchiveHeader, + &ArchiveHeaderData { + height: header.height, + hash: header_hash, + }, + self.peer_info.version, + )?) + } + } + Err(e) => { + warn!( + "Unable to prepare segment for PIBD request for {} at {}. Error: {}", + sm_req.hash, sm_req.height, e + ); + Consumed::None + } + } + } + Message::GetOutputBitmapSegment(req) => { + let SegmentRequest { + block_hash, + identifier, + } = req; + + match self.adapter.get_bitmap_segment(block_hash, identifier) { + Ok(segment) => Consumed::Response(Msg::new( + Type::OutputBitmapSegment, + OutputBitmapSegmentResponse { + block_hash, + segment: segment.into(), + }, + self.peer_info.version, + )?), + Err(chain::Error::SegmenterHeaderMismatch(hash, height)) => { + Consumed::Response(Msg::new( + Type::HasAnotherArchiveHeader, + &ArchiveHeaderData { + height: height, + hash: hash, + }, + self.peer_info.version, + )?) + } + Err(e) => { + warn!("Failed to process GetOutputBitmapSegment for block_hash={} and identifier={:?}. Error: {}", block_hash, identifier, e); + Consumed::None + } + } + } + Message::GetOutputSegment(req) => { + let SegmentRequest { + block_hash, + identifier, + } = req; + + match self.adapter.get_output_segment(block_hash, identifier) { + Ok(segment) => Consumed::Response(Msg::new( + Type::OutputSegment, + OutputSegmentResponse { + response: SegmentResponse { + block_hash, + segment, + }, + }, + self.peer_info.version, + )?), + Err(chain::Error::SegmenterHeaderMismatch(hash, height)) => { + Consumed::Response(Msg::new( + Type::HasAnotherArchiveHeader, + &ArchiveHeaderData { + height: height, + hash: hash, + }, + self.peer_info.version, + )?) + } + Err(e) => { + warn!("Failed to process GetOutputSegment for block_hash={} and identifier={:?}. Error: {}", block_hash, identifier, e); + Consumed::None + } + } + } + Message::GetRangeProofSegment(req) => { + let SegmentRequest { + block_hash, + identifier, + } = req; + match self.adapter.get_rangeproof_segment(block_hash, identifier) { + Ok(segment) => Consumed::Response(Msg::new( + Type::RangeProofSegment, + SegmentResponse { + block_hash, + segment, + }, + self.peer_info.version, + )?), + Err(chain::Error::SegmenterHeaderMismatch(hash, height)) => { + Consumed::Response(Msg::new( + Type::HasAnotherArchiveHeader, + &ArchiveHeaderData { + height: height, + hash: hash, + }, + self.peer_info.version, + )?) + } + Err(e) => { + warn!("Failed to process GetRangeProofSegment for block_hash={} and identifier={:?}. Error: {}", block_hash, identifier, e); + Consumed::None + } + } + } + Message::GetKernelSegment(req) => { + let SegmentRequest { + block_hash, + identifier, + } = req; + + match self.adapter.get_kernel_segment(block_hash, identifier) { + Ok(segment) => Consumed::Response(Msg::new( + Type::KernelSegment, + SegmentResponse { + block_hash, + segment, + }, + self.peer_info.version, + )?), + Err(chain::Error::SegmenterHeaderMismatch(hash, height)) => { + Consumed::Response(Msg::new( + Type::HasAnotherArchiveHeader, + &ArchiveHeaderData { + height: height, + hash: hash, + }, + self.peer_info.version, + )?) + } + Err(e) => { + warn!("Failed to process GetKernelSegment for block_hash={} and identifier={:?}. Error: {}", block_hash, identifier, e); + Consumed::None + } + } + } + Message::PibdSyncState(req) => { + self.report_pibd_response(true); + debug!("Received PibdSyncState from peer {:?}. Header height={}, output_bitmap_root={}", self.peer_info.addr, req.header_height, req.output_bitmap_root); + { + let mut status = self.pibd_status.lock(); + status.update_pibd_status( + req.header_hash, + req.header_height, + Some(req.output_bitmap_root), ); - return Err(e); } - - trace!( - "handle_payload: txhashset archive save to file {:?} success", - tmp, - ); - - let tmp_zip = File::open(tmp.clone())?; - let res = self - .adapter - .txhashset_write(sm_arch.hash, tmp_zip, &self.peer_info)?; - - info!( - "handle_payload: txhashset archive for {} at {}, DONE. Data Ok: {}", - sm_arch.hash, sm_arch.height, res + Consumed::None + } + Message::HasAnotherArchiveHeader(req) => { + debug!( + "Received HasAnotherArchiveHeader from peer {:?}. Has header at height {}", + self.peer_info.addr, req.height ); - - if let Err(e) = fs::remove_file(tmp.clone()) { - warn!("fail to remove tmp file: {:?}. err: {}", tmp, e); + let mut status = self.pibd_status.lock(); + status.update_pibd_status(req.hash, req.height, None); + Consumed::None + } + Message::OutputBitmapSegment(req) => { + let OutputBitmapSegmentResponse { + block_hash, + segment, + } = req; + debug!("Received Output Bitmap Segment: bh: {}", block_hash); + + if let Some(output_bitmap_root) = self.get_peer_output_bitmap_root() { + adapter + .receive_bitmap_segment(block_hash, output_bitmap_root, segment.into()) + .and_then(|ok| { + self.report_pibd_response(ok); + Ok(ok) + })?; } - - Ok(None) + Consumed::None } - Type::Error | Type::Hand | Type::Shake => { - debug!("Received an unexpected msg: {:?}", msg.header.msg_type); - Ok(None) + Message::OutputSegment(req) => { + let OutputSegmentResponse { response } = req; + debug!("Received Output Segment: bh, {}", response.block_hash,); + if let Some(output_bitmap_root) = self.get_peer_output_bitmap_root() { + adapter + .receive_output_segment( + response.block_hash, + output_bitmap_root, + response.segment.into(), + ) + .and_then(|ok| { + self.report_pibd_response(ok); + Ok(ok) + })?; + } + Consumed::None } - } + Message::RangeProofSegment(req) => { + let SegmentResponse { + block_hash, + segment, + } = req; + debug!("Received Rangeproof Segment: bh: {}", block_hash); + if let Some(output_bitmap_root) = self.get_peer_output_bitmap_root() { + adapter + .receive_rangeproof_segment(block_hash, output_bitmap_root, segment.into()) + .and_then(|ok| { + self.report_pibd_response(ok); + Ok(ok) + })?; + } + Consumed::None + } + Message::KernelSegment(req) => { + let SegmentResponse { + block_hash, + segment, + } = req; + debug!("Received Kernel Segment: bh: {}", block_hash); + if let Some(output_bitmap_root) = self.get_peer_output_bitmap_root() { + adapter + .receive_kernel_segment(block_hash, output_bitmap_root, segment.into()) + .and_then(|ok| { + self.report_pibd_response(ok); + Ok(ok) + })?; + } + Consumed::None + } + Message::Unknown(_) => Consumed::None, + }; + Ok(consumed) } } diff --git a/p2p/src/serv.rs b/p2p/src/serv.rs index 8212c76d68..2022be8246 100644 --- a/p2p/src/serv.rs +++ b/p2p/src/serv.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -22,10 +22,12 @@ use std::thread; use std::time::Duration; use crate::chain; -use crate::core::core; -use crate::core::core::hash::Hash; -use crate::core::global; -use crate::core::pow::Difficulty; +use crate::chain::txhashset::BitmapChunk; +use crate::grin_core::core; +use crate::grin_core::core::hash::Hash; +use crate::grin_core::core::{OutputIdentifier, Segment, SegmentIdentifier, TxKernel}; +use crate::grin_core::global; +use crate::grin_core::pow::Difficulty; use crate::handshake::Handshake; use crate::peer::Peer; use crate::peers::Peers; @@ -34,8 +36,10 @@ use crate::types::{ Capabilities, ChainAdapter, Error, NetAdapter, P2PConfig, PeerAddr, PeerInfo, ReasonForBan, TxHashSetRead, }; +use crate::util::secp::pedersen::RangeProof; use crate::util::StopState; use chrono::prelude::{DateTime, Utc}; +use grin_chain::txhashset::Segmenter; /// P2P server implementation, handling bootstrapping to find and connect to /// peers, receiving connections from other peers and keep track of all of them. @@ -55,7 +59,7 @@ impl Server { /// Creates a new idle p2p server with no peers pub fn new( db_root: &str, - capab: Capabilities, + capabilities: Capabilities, config: P2PConfig, adapter: Arc, genesis: Hash, @@ -65,7 +69,7 @@ impl Server { ) -> Result { Ok(Server { config: config.clone(), - capabilities: capab, + capabilities, handshake: Arc::new(Handshake::new( genesis, config.clone(), @@ -85,7 +89,7 @@ impl Server { /// Starts a new TCP server and listen to incoming connections. This is a /// blocking call until the TCP server stops. - pub fn listen(&self, header_cache_size: u64) -> Result<(), Error> { + pub fn listen(&self) -> Result<(), Error> { // start TCP listener and handle incoming connections let addr = SocketAddr::new(self.config.host, self.config.port); let listener = TcpListener::bind(addr)?; @@ -136,8 +140,10 @@ impl Server { } continue; } - match self.handle_new_peer(stream, header_cache_size) { - Err(Error::ConnectionClose) => debug!("shutting down, ignoring a new peer"), + match self.handle_new_peer(stream) { + Err(Error::ConnectionClose(err)) => { + debug!("shutting down, ignoring a new peer, {}", err) + } Err(e) => { debug!("Error accepting peer {}: {:?}", peer_addr.to_string(), e); let _ = self.peers.add_banned(peer_addr, ReasonForBan::BadHandshake); @@ -162,14 +168,16 @@ impl Server { /// Asks the server to connect to a new peer. Directly returns the peer if /// we're already connected to the provided address. - pub fn connect(&self, addr: PeerAddr, header_cache_size: u64) -> Result, Error> { + pub fn connect(&self, addr: PeerAddr) -> Result, Error> { if self.stop_state.is_stopped() { - return Err(Error::ConnectionClose); + return Err(Error::ConnectionClose(String::from("node is stopping"))); } if Peer::is_denied(&self.config, addr.clone()) { debug!("connect_peer: peer {:?} denied, not connecting.", addr); - return Err(Error::ConnectionClose); + return Err(Error::ConnectionClose(String::from( + "Peer is denied because it is in config black list", + ))); } if global::is_production_mode() { @@ -183,9 +191,9 @@ impl Server { // check if the onion address is self if global::is_production_mode() && self.self_onion_address.is_some() { - match addr.clone() { + match &addr { Onion(address) => { - if self.self_onion_address.as_ref().unwrap() == &address { + if self.self_onion_address.as_ref().unwrap() == address { debug!("error trying to connect with self: {}", address); return Err(Error::PeerWithSelf); } @@ -255,7 +263,10 @@ impl Server { } } else { // can't connect to this because we don't have a socks proxy. - return Err(Error::ConnectionClose); + return Err(Error::ConnectionClose(format!( + "Failed connect to Tor address {} because Tor socks is not configured", + onion_address + ))); } } }; @@ -271,7 +282,6 @@ impl Server { self_addr, &self.handshake, self.peers.clone(), - header_cache_size, peer_addr, (*self).clone(), )?; @@ -292,9 +302,9 @@ impl Server { } } - fn handle_new_peer(&self, stream: TcpStream, header_cache_size: u64) -> Result<(), Error> { + fn handle_new_peer(&self, stream: TcpStream) -> Result<(), Error> { if self.stop_state.is_stopped() { - return Err(Error::ConnectionClose); + return Err(Error::ConnectionClose(String::from("Server is stopping"))); } let total_diff = self.peers.total_difficulty()?; @@ -305,7 +315,6 @@ impl Server { total_diff, &self.handshake, self.peers.clone(), - header_cache_size, self.clone(), )?; self.peers.add_connected(Arc::new(peer))?; @@ -326,7 +335,7 @@ impl Server { /// different sets of peers themselves. In addition, it prevent potential /// duplicate connections, malicious or not. fn check_undesirable(&self, stream: &TcpStream) -> bool { - if self.peers.peer_inbound_count() + if self.peers.iter().inbound().connected().count() as u32 >= self.config.peer_max_inbound_count() + self.config.peer_listener_buffer_count() { debug!("Accepting new connection will exceed peer limit, refusing connection."); @@ -422,7 +431,6 @@ impl ChainAdapter for DummyAdapter { &self, _: &[core::BlockHeader], _: &PeerInfo, - _: u64, ) -> Result { Ok(true) } @@ -440,14 +448,6 @@ impl ChainAdapter for DummyAdapter { unimplemented!() } - fn process_add_headers_sync( - &self, - _: &[core::BlockHeader], - _: u64, - ) -> Result { - unimplemented!() - } - fn txhashset_receive_ready(&self) -> bool { false } @@ -477,6 +477,78 @@ impl ChainAdapter for DummyAdapter { fn get_tmpfile_pathname(&self, _tmpfile_name: String) -> PathBuf { unimplemented!() } + + fn prepare_segmenter(&self) -> Result { + unimplemented!() + } + + fn get_kernel_segment( + &self, + _hash: Hash, + _id: SegmentIdentifier, + ) -> Result, chain::Error> { + unimplemented!() + } + + fn get_bitmap_segment( + &self, + _hash: Hash, + _id: SegmentIdentifier, + ) -> Result, chain::Error> { + unimplemented!() + } + + fn get_output_segment( + &self, + _hash: Hash, + _id: SegmentIdentifier, + ) -> Result, chain::Error> { + unimplemented!() + } + + fn get_rangeproof_segment( + &self, + _hash: Hash, + _id: SegmentIdentifier, + ) -> Result, chain::Error> { + unimplemented!() + } + + fn receive_bitmap_segment( + &self, + _block_hash: Hash, + _bitmap_root_hash: Hash, + _segment: Segment, + ) -> Result { + unimplemented!() + } + + fn receive_output_segment( + &self, + _block_hash: Hash, + _bitmap_root_hash: Hash, + _segment: Segment, + ) -> Result { + unimplemented!() + } + + fn receive_rangeproof_segment( + &self, + _block_hash: Hash, + _bitmap_root_hash: Hash, + _segment: Segment, + ) -> Result { + unimplemented!() + } + + fn receive_kernel_segment( + &self, + _block_hash: Hash, + _bitmap_root_hash: Hash, + _segment: Segment, + ) -> Result { + unimplemented!() + } } impl NetAdapter for DummyAdapter { diff --git a/p2p/src/store.rs b/p2p/src/store.rs index 64baacbf85..fe3495cf5e 100644 --- a/p2p/src/store.rs +++ b/p2p/src/store.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,7 +19,7 @@ use num::FromPrimitive; use rand::seq::SliceRandom; use rand::thread_rng; -use crate::core::ser::{self, Readable, Reader, Writeable, Writer}; +use crate::grin_core::ser::{self, DeserializationMode, Readable, Reader, Writeable, Writer}; use crate::types::{Capabilities, PeerAddr, ReasonForBan}; use grin_store::{self, option_to_not_found, to_key, Error}; @@ -139,10 +139,20 @@ impl PeerStore { batch.commit() } + pub fn save_peers(&self, p: Vec) -> Result<(), Error> { + let batch = self.db.batch()?; + for pd in p { + debug!("save_peers: {:?} marked {:?}", pd.addr, pd.flags); + batch.put_ser(&peer_key(pd.addr.clone())[..], &pd)?; + } + batch.commit() + } + pub fn get_peer(&self, peer_addr: PeerAddr) -> Result { - option_to_not_found(self.db.get_ser(&peer_key(peer_addr.clone())[..]), || { - format!("Peer at address: {}", peer_addr) - }) + option_to_not_found( + self.db.get_ser(&peer_key(peer_addr.clone())[..], None), + || format!("Peer at address: {}", peer_addr), + ) } pub fn exists_peer(&self, peer_addr: PeerAddr) -> Result { @@ -157,6 +167,7 @@ impl PeerStore { batch.commit() } + /// Find some peers in our local db. pub fn find_peers( &self, state: State, @@ -164,24 +175,28 @@ impl PeerStore { count: usize, ) -> Result, Error> { let mut peers = self - .db - .iter::(&to_key(PEER_PREFIX, ""))? - .map(|(_, v)| v) + .peers_iter()? .filter(|p| p.flags == state && p.capabilities.contains(cap)) .collect::>(); peers[..].shuffle(&mut thread_rng()); Ok(peers.iter().take(count).cloned().collect()) } + /// Iterator over all known peers. + pub fn peers_iter(&self) -> Result, Error> { + let key = to_key(PEER_PREFIX, ""); + let protocol_version = self.db.protocol_version(); + self.db.iter(&key, move |_, mut v| { + ser::deserialize(&mut v, protocol_version, DeserializationMode::default()) + .map_err(From::from) + }) + } + /// List all known peers /// Used for /v1/peers/all api endpoint pub fn all_peers(&self) -> Result, Error> { - let key = to_key(PEER_PREFIX, ""); - Ok(self - .db - .iter::(&key)? - .map(|(_, v)| v) - .collect::>()) + let peers: Vec = self.peers_iter()?.collect(); + Ok(peers) } /// Convenience method to load a peer data, update its status and save it @@ -190,7 +205,7 @@ impl PeerStore { let batch = self.db.batch()?; let mut peer = option_to_not_found( - batch.get_ser::(&peer_key(peer_addr.clone())[..]), + batch.get_ser::(&peer_key(peer_addr.clone())[..], None), || format!("Peer at address: {}", peer_addr), )?; peer.flags = new_state; @@ -209,7 +224,7 @@ impl PeerStore { { let mut to_remove = vec![]; - for x in self.all_peers()? { + for x in self.peers_iter()? { if predicate(&x) { to_remove.push(x) } diff --git a/p2p/src/types.rs b/p2p/src/types.rs index 2fa3908da6..425b44fd3b 100644 --- a/p2p/src/types.rs +++ b/p2p/src/types.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,7 +14,6 @@ use crate::types::PeerAddr::Ip; use crate::types::PeerAddr::Onion; -use failure::Fail; use std::convert::From; use std::fmt; use std::fs::File; @@ -30,16 +29,18 @@ use serde::de::{SeqAccess, Visitor}; use serde::{Deserialize, Deserializer}; use std::sync::atomic::AtomicUsize; -use grin_store; - use crate::chain; -use crate::core::core; -use crate::core::core::hash::Hash; -use crate::core::global; -use crate::core::pow::Difficulty; -use crate::core::ser::{self, ProtocolVersion, Readable, Reader, Writeable, Writer}; +use crate::chain::txhashset::BitmapChunk; +use crate::grin_core::core; +use crate::grin_core::core::hash::Hash; +use crate::grin_core::core::{OutputIdentifier, Segment, SegmentIdentifier, TxKernel}; +use crate::grin_core::global; +use crate::grin_core::pow::Difficulty; +use crate::grin_core::ser::{self, ProtocolVersion, Readable, Reader, Writeable, Writer}; use crate::msg::PeerAddrs; +use crate::util::secp::pedersen::RangeProof; use crate::util::RwLock; +use grin_chain::txhashset::Segmenter; use std::time::Instant; /// Maximum number of block headers a peer should ever send @@ -71,44 +72,46 @@ const PEER_MIN_PREFERRED_OUTBOUND_COUNT: u32 = 8; /// than allowed by PEER_MAX_INBOUND_COUNT to encourage network bootstrapping. const PEER_LISTENER_BUFFER_COUNT: u32 = 8; -#[derive(Debug, Fail)] +#[derive(Debug, thiserror::Error)] pub enum Error { - #[fail(display = "p2p Serialization error, {}", _0)] + #[error("p2p Serialization error, {0}")] Serialization(ser::Error), - #[fail(display = "p2p Connection error, {}", _0)] + #[error("p2p Connection error, {0}")] Connection(io::Error), /// Header type does not match the expected message type - #[fail(display = "p2p bad message")] + #[error("p2p bad message")] BadMessage, - #[fail(display = "p2p message Length error")] + #[error("p2p unexpected message {0}")] + UnexpectedMessage(String), + #[error("p2p message Length error")] MsgLen, - #[fail(display = "p2p banned")] + #[error("p2p banned")] Banned, - #[fail(display = "p2p closed connection")] - ConnectionClose, - #[fail(display = "p2p timeout")] + #[error("p2p closed connection, {0}")] + ConnectionClose(String), + #[error("p2p timeout")] Timeout, - #[fail(display = "p2p store error, {}", _0)] + #[error("p2p store error, {0}")] Store(grin_store::Error), - #[fail(display = "p2p chain error, {}", _0)] + #[error("p2p chain error, {0}")] Chain(chain::Error), - #[fail(display = "peer with self")] + #[error("peer with self")] PeerWithSelf, - #[fail(display = "p2p no dandelion relay")] + #[error("p2p no dandelion relay")] NoDandelionRelay, - #[fail(display = "p2p genesis mismatch: {} vs peer {}", us, peer)] + #[error("p2p genesis mismatch: {us} vs peer {peer}")] GenesisMismatch { us: Hash, peer: Hash }, - #[fail(display = "p2p send error, {}", _0)] + #[error("p2p send error, {0}")] Send(String), - #[fail(display = "peer not found")] + #[error("peer not found")] PeerNotFound, - #[fail(display = "peer not banned")] + #[error("peer not banned")] PeerNotBanned, - #[fail(display = "peer exception, {}", _0)] + #[error("peer exception, {0}")] PeerException(String), - #[fail(display = "p2p internal error: {}", _0)] + #[error("p2p internal error: {0}")] Internal(String), - #[fail(display = "libp2p error: {}", _0)] + #[error("libp2p error: {0}")] Libp2pError(String), } @@ -360,10 +363,6 @@ pub struct P2PConfig { /// The list of seed nodes, if using Seeding as a seed type pub seeds: Option, - /// Capabilities expose by this node, also conditions which other peers this - /// node will have an affinity toward when connection. - pub capabilities: Capabilities, - pub peers_allow: Option, pub peers_deny: Option, @@ -391,7 +390,6 @@ impl Default for P2PConfig { P2PConfig { host: ipaddr, port: 3414, - capabilities: Capabilities::FULL_NODE, seeding_type: Seeding::default(), seeds: None, peers_allow: None, @@ -479,8 +477,7 @@ bitflags! { /// Can provide full history of headers back to genesis /// (for at least one arbitrary fork). const HEADER_HIST = 0b0000_0001; - /// Can provide block headers and the TxHashSet for some recent-enough - /// height. + /// Can provide recent txhashset archive for fast sync. const TXHASHSET_HIST = 0b0000_0010; /// Can provide a list of healthy peers const PEER_LIST = 0b0000_0100; @@ -488,16 +485,22 @@ bitflags! { const TX_KERNEL_HASH = 0b0000_1000; /// Can send/receive tor addresses const TOR_ADDRESS = 0b0001_0000; + /// Can provide PIBD segments during initial byte download (fast sync). + const PIBD_HIST = 0b0010_0000; + /// Can provide historical blocks for archival sync. + const BLOCK_HIST = 0b0100_0000; + } +} - /// All nodes right now are "full nodes". - /// Some nodes internally may maintain longer block histories (archival_mode) - /// but we do not advertise this to other nodes. - /// All nodes by default will accept lightweight "kernel first" tx broadcast. - const FULL_NODE = Capabilities::HEADER_HIST.bits - | Capabilities::TXHASHSET_HIST.bits - | Capabilities::PEER_LIST.bits - | Capabilities::TX_KERNEL_HASH.bits - | Capabilities::TOR_ADDRESS.bits; +/// Default capabilities. +impl Default for Capabilities { + fn default() -> Self { + Capabilities::HEADER_HIST + | Capabilities::TXHASHSET_HIST + | Capabilities::PEER_LIST + | Capabilities::TX_KERNEL_HASH + | Capabilities::TOR_ADDRESS + | Capabilities::PIBD_HIST } } @@ -524,6 +527,8 @@ enum_from_primitive! { ManualBan = 5, FraudHeight = 6, BadHandshake = 7, + PibdFailure = 8, + PibdInactive = 9, } } @@ -699,12 +704,6 @@ pub trait ChainAdapter: Sync + Send { peer_info: &PeerInfo, ) -> Result; - fn process_add_headers_sync( - &self, - bh: &[core::BlockHeader], - header_cache_size: u64, - ) -> Result; - /// A set of block header has been received, typically in response to a /// block /// header request. @@ -712,7 +711,6 @@ pub trait ChainAdapter: Sync + Send { &self, bh: &[core::BlockHeader], peer_info: &PeerInfo, - header_sync_cache_size: u64, ) -> Result; /// Finds a list of block headers based on the provided locator. Tries to @@ -763,6 +761,61 @@ pub trait ChainAdapter: Sync + Send { /// Get a tmp file path in above specific tmp dir (create tmp dir if not exist) /// Delete file if tmp file already exists fn get_tmpfile_pathname(&self, tmpfile_name: String) -> PathBuf; + + /// For MWC handshake we need to have a segmenter ready with output bitmap ready and commited. + fn prepare_segmenter(&self) -> Result; + + fn get_kernel_segment( + &self, + hash: Hash, + id: SegmentIdentifier, + ) -> Result, chain::Error>; + + fn get_bitmap_segment( + &self, + hash: Hash, + id: SegmentIdentifier, + ) -> Result, chain::Error>; + + fn get_output_segment( + &self, + hash: Hash, + id: SegmentIdentifier, + ) -> Result, chain::Error>; + + fn get_rangeproof_segment( + &self, + hash: Hash, + id: SegmentIdentifier, + ) -> Result, chain::Error>; + + fn receive_bitmap_segment( + &self, + block_hash: Hash, + bitmap_root_hash: Hash, + segment: Segment, + ) -> Result; + + fn receive_output_segment( + &self, + block_hash: Hash, + bitmap_root_hash: Hash, + segment: Segment, + ) -> Result; + + fn receive_rangeproof_segment( + &self, + block_hash: Hash, + bitmap_root_hash: Hash, + segment: Segment, + ) -> Result; + + fn receive_kernel_segment( + &self, + block_hash: Hash, + bitmap_root_hash: Hash, + segment: Segment, + ) -> Result; } /// Additional methods required by the protocol that don't need to be @@ -781,3 +834,19 @@ pub trait NetAdapter: ChainAdapter { /// Is this peer currently banned? fn is_banned(&self, addr: PeerAddr) -> bool; } + +#[derive(Clone, Debug)] +pub struct AttachmentMeta { + pub size: usize, + pub hash: Hash, + pub height: u64, + pub start_time: DateTime, + pub path: PathBuf, +} + +#[derive(Clone, Debug)] +pub struct AttachmentUpdate { + pub read: usize, + pub left: usize, + pub meta: Arc, +} diff --git a/p2p/tests/capabilities.rs b/p2p/tests/capabilities.rs new file mode 100644 index 0000000000..398af4c2b5 --- /dev/null +++ b/p2p/tests/capabilities.rs @@ -0,0 +1,56 @@ +// Copyright 2021 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use grin_p2p::Capabilities; + +// We use `contains()` to filter capabilities bits. +#[test] +fn capabilities_contains() { + let x = Capabilities::HEADER_HIST; + + // capabilities contain themselves + assert!(x.contains(Capabilities::HEADER_HIST)); + + // UNKNOWN can be used to filter for any capabilities + assert!(x.contains(Capabilities::UNKNOWN)); + + // capabilities do not contain other disjoint capabilities + assert_eq!(false, x.contains(Capabilities::PEER_LIST)); +} + +#[test] +fn default_capabilities() { + let x = Capabilities::default(); + + // Check that default capabilities is covered by UNKNOWN. + assert!(x.contains(Capabilities::UNKNOWN)); + + // Check that all the expected capabilities are included in default capabilities. + assert!(x.contains(Capabilities::HEADER_HIST)); + assert!(x.contains(Capabilities::TXHASHSET_HIST)); + assert!(x.contains(Capabilities::PEER_LIST)); + assert!(x.contains(Capabilities::TX_KERNEL_HASH)); + assert!(x.contains(Capabilities::TOR_ADDRESS)); + assert!(x.contains(Capabilities::PIBD_HIST)); + + assert_eq!( + x, + Capabilities::HEADER_HIST + | Capabilities::TXHASHSET_HIST + | Capabilities::PEER_LIST + | Capabilities::TX_KERNEL_HASH + | Capabilities::TOR_ADDRESS + | Capabilities::PIBD_HIST + ); +} diff --git a/p2p/tests/peer_addr.rs b/p2p/tests/peer_addr.rs index d46f3ddbe2..ed9857c463 100644 --- a/p2p/tests/peer_addr.rs +++ b/p2p/tests/peer_addr.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/p2p/tests/peer_handshake.rs b/p2p/tests/peer_handshake.rs index 6931115986..3c81bc9201 100644 --- a/p2p/tests/peer_handshake.rs +++ b/p2p/tests/peer_handshake.rs @@ -58,7 +58,7 @@ fn peer_handshake() { }; let net_adapter = Arc::new(p2p::DummyAdapter {}); let server_inner = p2p::Server::new( - ".grin", + ".mwc", p2p::Capabilities::UNKNOWN, p2p_config.clone(), net_adapter.clone(), @@ -71,7 +71,7 @@ fn peer_handshake() { let server = Arc::new(server_inner.clone()); let p2p_inner = server.clone(); - let _ = thread::spawn(move || p2p_inner.listen(100_000)); + let _ = thread::spawn(move || p2p_inner.listen()); thread::sleep(time::Duration::from_secs(1)); @@ -86,7 +86,6 @@ fn peer_handshake() { my_addr.clone(), &p2p::handshake::Handshake::new(Hash::from_vec(&vec![]), p2p_config.clone(), None), net_adapter, - 100_000, None, server_inner, ) @@ -101,5 +100,5 @@ fn peer_handshake() { let server_peer = server.peers.get_connected_peer(my_addr).unwrap(); assert_eq!(server_peer.info.total_difficulty(), Difficulty::min()); - assert!(server.peers.peer_count() > 0); + assert!(server.peers.iter().connected().count() > 0); } diff --git a/p2p/tests/ser_deser.rs b/p2p/tests/ser_deser.rs index 1c8cee347c..7ed0549795 100644 --- a/p2p/tests/ser_deser.rs +++ b/p2p/tests/ser_deser.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -42,6 +42,8 @@ fn test_type_enum() { #[test] fn test_capabilities() { + let expected = p2p::types::Capabilities::default(); + assert_eq!( p2p::types::Capabilities::from_bits_truncate(0b00000000 as u32), p2p::types::Capabilities::UNKNOWN @@ -52,26 +54,16 @@ fn test_capabilities() { ); assert_eq!( - p2p::types::Capabilities::from_bits_truncate(0b11111 as u32), - p2p::types::Capabilities::FULL_NODE - ); - assert_eq!( - p2p::types::Capabilities::from_bits_truncate(0b00011111 as u32), - p2p::types::Capabilities::FULL_NODE - ); - assert_eq!( - p2p::types::Capabilities::from_bits_truncate(0b11111111 as u32), - p2p::types::Capabilities::FULL_NODE + expected, + p2p::types::Capabilities::from_bits_truncate(0b111111 as u32), ); + assert_eq!( - p2p::types::Capabilities::from_bits_truncate(0b01011111 as u32), - p2p::types::Capabilities::FULL_NODE + expected, + p2p::types::Capabilities::from_bits_truncate(0b0111111 as u32), ); - assert!( - p2p::types::Capabilities::from_bits_truncate(0b01011111 as u32) - .contains(p2p::types::Capabilities::FULL_NODE) - ); + assert!(p2p::types::Capabilities::from_bits_truncate(0b0111111 as u32).contains(expected)); assert!( p2p::types::Capabilities::from_bits_truncate(0b00101111 as u32) diff --git a/pool/Cargo.toml b/pool/Cargo.toml index bfe429a46a..9b4420dcac 100644 --- a/pool/Cargo.toml +++ b/pool/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "grin_pool" -version = "4.4.2" +version = "5.3.2" authors = ["Grin Developers "] description = "Chain implementation for grin, a simple, private and scalable cryptocurrency implementation based on the MimbleWimble chain format." license = "Apache-2.0" @@ -14,15 +14,14 @@ blake2-rfc = "0.2" rand = "0.6" serde = "1" serde_derive = "1" +thiserror = "1" lru-cache = "0.1" log = "0.4" chrono = "0.4.11" -failure = "0.1" -failure_derive = "0.1" -grin_core = { path = "../core", version = "4.4.2" } -grin_keychain = { path = "../keychain", version = "4.4.2" } -grin_util = { path = "../util", version = "4.4.2" } +grin_core = { path = "../core", version = "5.3.2" } +grin_keychain = { path = "../keychain", version = "5.3.2" } +grin_util = { path = "../util", version = "5.3.2" } [dev-dependencies] -grin_chain = { path = "../chain", version = "4.4.2" } +grin_chain = { path = "../chain", version = "5.3.2" } diff --git a/pool/fuzz/Cargo.lock b/pool/fuzz/Cargo.lock index e679a69562..dc1c8895b3 100644 --- a/pool/fuzz/Cargo.lock +++ b/pool/fuzz/Cargo.lock @@ -1,5 +1,7 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. +version = 3 + [[package]] name = "addr2line" version = "0.13.0" @@ -35,9 +37,9 @@ dependencies = [ [[package]] name = "arbitrary" -version = "0.4.5" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cb544f1057eaaff4b34f8c4dcf56fc3cd04debd291998405d135017a7c3c0f4" +checksum = "5a7924531f38b1970ff630f03eb20a2fde69db5c590c93b0f3482e95dcc5fd60" [[package]] name = "arc-swap" @@ -45,12 +47,6 @@ version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d25d88fd6b8041580a654f9d0c581a047baee2b3efee13275f2fc392fc75034" -[[package]] -name = "arrayref" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" - [[package]] name = "arrayvec" version = "0.3.25" @@ -100,7 +96,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "46254cf2fdcdf1badb5934448c1bcbe046a56537b3987d96c51a7afc5d03f293" dependencies = [ "addr2line", - "cfg-if", + "cfg-if 0.1.10", "libc", "miniz_oxide", "object", @@ -109,29 +105,24 @@ dependencies = [ [[package]] name = "base64" -version = "0.9.3" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "489d6c0ed21b11d038c31b6ceccca973e65d73ba3bd8ecb9a2babf5546164643" -dependencies = [ - "byteorder", - "safemem", -] +checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" [[package]] -name = "base64" -version = "0.12.3" +name = "base64ct" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" +checksum = "3bdca834647821e0b13d9539a8634eb62d3501b6b6c2cec1722786ee6671b851" [[package]] name = "bindgen" -version = "0.52.0" +version = "0.56.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1c85344eb535a31b62f0af37be84441ba9e7f0f4111eb0530f43d15e513fe57" +checksum = "2da379dbebc0b76ef63ca68d8fc6e71c0f13e59432e0987e508c1820e6ab5239" dependencies = [ "bitflags 1.2.1", "cexpr", - "cfg-if", "clang-sys", "clap", "env_logger", @@ -139,7 +130,7 @@ dependencies = [ "lazycell", "log", "peeking_take_while", - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", "regex", "rustc-hash", @@ -177,26 +168,25 @@ dependencies = [ [[package]] name = "block-buffer" -version = "0.3.3" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a076c298b9ecdb530ed9d967e74a6027d6a7478924520acddcddc24c1c8ab3ab" +checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "arrayref", - "byte-tools", + "generic-array", ] -[[package]] -name = "byte-tools" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "560c32574a12a89ecd91f5e742165893f86e3ab98d21f8ea548658eb9eef5f40" - [[package]] name = "byteorder" version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" +[[package]] +name = "bytes" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" + [[package]] name = "cc" version = "1.0.58" @@ -205,9 +195,9 @@ checksum = "f9a06fb2e53271d7c279ec1efea6ab691c35a2ae67ec0d91d7acec0caf13b518" [[package]] name = "cexpr" -version = "0.3.6" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fce5b5fb86b0c57c20c834c1b412fd09c77c8a59b9473f86272709e78874cd1d" +checksum = "f4aedb84272dbe89af497cf81375129abda4fc0a9e7c5d317498c15cc30c0d27" dependencies = [ "nom", ] @@ -218,6 +208,12 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + [[package]] name = "chrono" version = "0.4.13" @@ -232,9 +228,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "0.28.1" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81de550971c976f176130da4b2978d3b524eaa0fd9ac31f3ceb5ae1231fb4853" +checksum = "5a050e2153c5be08febd6734e29298e844fdb0fa21aeddd63b4eb7baa106c69b" dependencies = [ "glob", "libc", @@ -271,31 +267,40 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" +[[package]] +name = "cpufeatures" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59a6001667ab124aebae2a495118e11d30984c3a653e99d86d58971708cf5e4b" +dependencies = [ + "libc", +] + [[package]] name = "crc32fast" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba125de2af0df55319f41944744ad91c71113bf74a4646efff39afe1f6842db1" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", ] [[package]] -name = "croaring-mw" -version = "0.4.5" +name = "croaring" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcdee571ce4bf3e49c382de29c38bd33b9fa871e1358c7749b9dcc5dc2776221" +checksum = "a00d14ad7d8cc067d7a5c93e8563791bfec3f7182361db955530db11d94ed63c" dependencies = [ "byteorder", - "croaring-sys-mw", + "croaring-sys", "libc", ] [[package]] -name = "croaring-sys-mw" -version = "0.4.5" +name = "croaring-sys" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea52c177269fa54c526b054dac8e623721de18143ebfd2ea84ffc023d6c271ee" +checksum = "c5d6a46501bb403a61e43bc7cd19977b4f9c54efd703949b00259cc61afb5a86" dependencies = [ "bindgen", "cc", @@ -304,19 +309,19 @@ dependencies = [ [[package]] name = "crypto-mac" -version = "0.6.2" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7afa06d05a046c7a47c3a849907ec303504608c927f4e85f7bfff22b7180d971" +checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" dependencies = [ - "constant_time_eq", "generic-array", + "subtle", ] [[package]] name = "digest" -version = "0.7.6" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03b072242a8cbaf9c145665af9d250c59af3b958f83ed6824e13533cf76d5b90" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ "generic-array", ] @@ -338,12 +343,12 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.7.1" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" +checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3" dependencies = [ "atty", - "humantime", + "humantime 2.1.0", "log", "regex", "termcolor", @@ -365,7 +370,7 @@ version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", "syn 1.0.34", "synstructure 0.12.4", @@ -383,7 +388,7 @@ version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68c90b0fc46cf89d227cc78b40e494ff81287a92dd07631e5af0d06fe3cf885e" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "crc32fast", "libc", "miniz_oxide", @@ -409,11 +414,12 @@ checksum = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2" [[package]] name = "generic-array" -version = "0.9.0" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef25c5683767570c2bbd7deba372926a55eaae9982d7726ee2a1050239d45b9d" +checksum = "fd48d33ec7f05fbfa152300fdad764757cbded343c1aa1cff2fbaf4134851803" dependencies = [ "typenum", + "version_check", ] [[package]] @@ -422,7 +428,7 @@ version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "libc", "wasi", ] @@ -441,16 +447,14 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "grin_chain" -version = "4.1.2-alpha.1" +version = "5.3.0" dependencies = [ "bit-vec", "bitflags 1.2.1", "byteorder", "chrono", - "croaring-mw", + "croaring", "enum_primitive", - "failure", - "failure_derive", "grin_core", "grin_keychain", "grin_store", @@ -460,19 +464,19 @@ dependencies = [ "lru-cache", "serde", "serde_derive", + "thiserror", ] [[package]] name = "grin_core" -version = "4.1.2-alpha.1" +version = "5.3.0" dependencies = [ "blake2-rfc", "byteorder", + "bytes", "chrono", - "croaring-mw", + "croaring", "enum_primitive", - "failure", - "failure_derive", "grin_keychain", "grin_util", "lazy_static", @@ -484,12 +488,13 @@ dependencies = [ "serde", "serde_derive", "siphasher", - "zeroize 1.1.0", + "thiserror", + "zeroize", ] [[package]] name = "grin_keychain" -version = "4.1.2-alpha.1" +version = "5.3.0" dependencies = [ "blake2-rfc", "byteorder", @@ -505,17 +510,15 @@ dependencies = [ "serde_derive", "serde_json", "sha2", - "zeroize 1.1.0", + "zeroize", ] [[package]] name = "grin_pool" -version = "4.1.2-alpha.1" +version = "5.3.0" dependencies = [ "blake2-rfc", "chrono", - "failure", - "failure_derive", "grin_core", "grin_keychain", "grin_util", @@ -523,6 +526,7 @@ dependencies = [ "rand 0.6.5", "serde", "serde_derive", + "thiserror", ] [[package]] @@ -540,9 +544,9 @@ dependencies = [ [[package]] name = "grin_secp256k1zkp" -version = "0.7.9" +version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c2e7431d1999f02112c2383c9d33e7a6212947abfba92c87ab7283ba667a8b" +checksum = "3af3c4c4829b3e2e7ee1d9a542833e4244912fbb887fabe44682558159b068a7" dependencies = [ "arrayvec 0.3.25", "cc", @@ -551,17 +555,15 @@ dependencies = [ "rustc-serialize", "serde", "serde_json", - "zeroize 0.9.3", + "zeroize", ] [[package]] name = "grin_store" -version = "4.1.2-alpha.1" +version = "5.3.0" dependencies = [ "byteorder", - "croaring-mw", - "failure", - "failure_derive", + "croaring", "grin_core", "grin_util", "libc", @@ -571,14 +573,15 @@ dependencies = [ "serde", "serde_derive", "tempfile", + "thiserror", ] [[package]] name = "grin_util" -version = "4.1.2-alpha.1" +version = "5.3.0" dependencies = [ "backtrace", - "base64 0.12.3", + "base64", "byteorder", "grin_secp256k1zkp", "lazy_static", @@ -589,7 +592,7 @@ dependencies = [ "serde", "serde_derive", "walkdir", - "zeroize 1.1.0", + "zeroize", "zip", ] @@ -604,9 +607,9 @@ dependencies = [ [[package]] name = "hmac" -version = "0.6.3" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "733e1b3ac906631ca01ebb577e9bb0f5e37a454032b9036b5eaea4013ed6f99a" +checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" dependencies = [ "crypto-mac", "digest", @@ -621,6 +624,12 @@ dependencies = [ "quick-error", ] +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + [[package]] name = "itoa" version = "0.4.6" @@ -647,12 +656,13 @@ checksum = "a9f8082297d534141b30c8d39e9b1773713ab50fdbe4ff30f750d063b3bfd701" [[package]] name = "libfuzzer-sys" -version = "0.3.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d718794b8e23533b9069bd2c4597d69e41cc7ab1c02700a502971aca0cdcf24" +checksum = "336244aaeab6a12df46480dc585802aa743a72d66b11937844c61bbca84c991d" dependencies = [ "arbitrary", "cc", + "once_cell", ] [[package]] @@ -667,11 +677,11 @@ dependencies = [ [[package]] name = "libloading" -version = "0.5.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2b111a074963af1d37a139918ac6d49ad1d0d5e47f72fd55388619691a7d753" +checksum = "efbc0f03f9a775e9f6aed295c6a1ba2253c5757a9e03d55c6caa46a681abcddd" dependencies = [ - "cc", + "cfg-if 1.0.0", "winapi", ] @@ -708,7 +718,7 @@ version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "serde", ] @@ -728,7 +738,7 @@ dependencies = [ "chrono", "flate2", "fnv", - "humantime", + "humantime 1.3.0", "libc", "log", "log-mdc", @@ -785,9 +795,9 @@ checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" [[package]] name = "nom" -version = "4.2.3" +version = "5.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ad2a91a8e869eeb30b9cb3119ae87773a8f4ae617f41b1eb9c154b2905f7bd6" +checksum = "ffb4262d26ed83a1c0a33a38fe2bb15797329c85770da05e6b828ddb782627af" dependencies = [ "memchr", "version_check", @@ -891,6 +901,18 @@ version = "0.2.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4eae0151b9dacf24fcc170d9995e511669a082856a91f958a2fe380bfab3fb22" +[[package]] +name = "once_cell" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18a6dbe30758c9f83eb00cbea4ac95966305f5a7772f3f42ebfc7fc7eddbd8e1" + +[[package]] +name = "opaque-debug" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" + [[package]] name = "ordered-float" version = "1.1.0" @@ -916,7 +938,7 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d58c7c768d4ba344e3e8d72518ac13e259d7c7ade24167003b8488e10b6740a3" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "cloudabi", "libc", "redox_syscall", @@ -925,18 +947,26 @@ dependencies = [ ] [[package]] -name = "pbkdf2" +name = "password-hash" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c09cddfbfc98de7f76931acf44460972edb4023eb14d0c6d4018800e552d8e0" +checksum = "77e0b28ace46c5a396546bcf443bf422b57049617433d8854227352a4a9b24e7" dependencies = [ - "base64 0.9.3", - "byteorder", - "constant_time_eq", + "base64ct", + "rand_core 0.6.3", + "subtle", +] + +[[package]] +name = "pbkdf2" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d95f5254224e617595d2cc3cc73ff0a5eaf2637519e25f03388154e9378b6ffa" +dependencies = [ + "base64ct", "crypto-mac", - "generic-array", "hmac", - "rand 0.5.6", + "password-hash", "sha2", ] @@ -946,12 +976,6 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" -[[package]] -name = "podio" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b18befed8bc2b61abc79a457295e7e838417326da1586050b919414073977f19" - [[package]] name = "ppv-lite86" version = "0.2.8" @@ -960,20 +984,11 @@ checksum = "237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea" [[package]] name = "proc-macro2" -version = "0.4.30" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759" +checksum = "dd96a1e8ed2596c337f8eae5f24924ec83f5ad5ab21ea8e455d3566c69fbcaf7" dependencies = [ - "unicode-xid 0.1.0", -] - -[[package]] -name = "proc-macro2" -version = "1.0.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "beae6331a816b1f65d04c45b078fd8e6c93e8071771f41b8163255bbd8d7c8fa" -dependencies = [ - "unicode-xid 0.2.1", + "unicode-ident", ] [[package]] @@ -982,22 +997,13 @@ version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" -[[package]] -name = "quote" -version = "0.6.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ce23b6b870e8f94f81fb0a363d65d86675884b34a09043c81e5562f11c1f8e1" -dependencies = [ - "proc-macro2 0.4.30", -] - [[package]] name = "quote" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2", ] [[package]] @@ -1089,6 +1095,12 @@ dependencies = [ "getrandom", ] +[[package]] +name = "rand_core" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" + [[package]] name = "rand_hc" version = "0.1.0" @@ -1204,13 +1216,13 @@ dependencies = [ [[package]] name = "ripemd160" -version = "0.7.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "482aa56cc68aaeccdaaff1cc5a72c247da8bbad3beb174ca5741f274c22883fb" +checksum = "2eca4ecc81b7f313189bf73ce724400a07da2a6dac19588b03c8bd76a2dcc251" dependencies = [ "block-buffer", - "byte-tools", "digest", + "opaque-debug", ] [[package]] @@ -1237,12 +1249,6 @@ version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" -[[package]] -name = "safemem" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" - [[package]] name = "same-file" version = "1.0.6" @@ -1267,6 +1273,12 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "httpdate" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" + [[package]] name = "serde-value" version = "0.6.0" @@ -1283,7 +1295,7 @@ version = "1.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a0be94b04690fbaed37cddffc5c134bf537c8e3329d53e982fe04c374978f8e" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", "syn 1.0.34", ] @@ -1313,14 +1325,15 @@ dependencies = [ [[package]] name = "sha2" -version = "0.7.1" +version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9eb6be24e4c23a84d7184280d2722f7f2731fcdd4a9d886efbfe4413e4847ea0" +checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" dependencies = [ "block-buffer", - "byte-tools", + "cfg-if 1.0.0", + "cpufeatures", "digest", - "fake-simd", + "opaque-debug", ] [[package]] @@ -1366,11 +1379,11 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.34" +version = "1.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "936cae2873c940d92e697597c5eee105fb570cd5689c695806f672883653349b" +checksum = "c50aef8a904de4c23c788f104b7dddc7d6f79c647c7c8ce4cc8f73eb0ca773dd" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", "unicode-xid 0.2.1", ] @@ -1393,7 +1406,7 @@ version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", "syn 1.0.34", "unicode-xid 0.2.1", @@ -1405,7 +1418,7 @@ version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "libc", "rand 0.7.3", "redox_syscall", @@ -1431,6 +1444,26 @@ dependencies = [ "unicode-width", ] +[[package]] +name = "thiserror" +version = "1.0.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd829fe32373d27f76265620b5309d0340cb8550f523c1dda251d6298069069a" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0396bc89e626244658bef819e22d0cc459e795a5ebe878e6ec336d1674a8d79a" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "thread-id" version = "3.3.0" @@ -1517,9 +1550,9 @@ checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] name = "version_check" -version = "0.1.5" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "914b1a6776c4c929a602fafd8bc742e06365d4bcbe48c30f9cca5824f70dc9dd" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "walkdir" @@ -1587,34 +1620,13 @@ dependencies = [ "linked-hash-map", ] -[[package]] -name = "zeroize" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45af6a010d13e4cf5b54c94ba5a2b2eba5596b9e46bf5875612d332a1f2b3f86" -dependencies = [ - "zeroize_derive 0.9.3", -] - [[package]] name = "zeroize" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3cbac2ed2ba24cc90f5e06485ac8c7c1e5449fe8911aef4d8877218af021a5b8" dependencies = [ - "zeroize_derive 1.0.0", -] - -[[package]] -name = "zeroize_derive" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "080616bd0e31f36095288bb0acdf1f78ef02c2fa15527d7e993f2a6c7591643e" -dependencies = [ - "proc-macro2 0.4.30", - "quote 0.6.13", - "syn 0.15.44", - "synstructure 0.10.2", + "zeroize_derive", ] [[package]] @@ -1623,18 +1635,19 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de251eec69fc7c1bc3923403d18ececb929380e016afe103da75f396704f8ca2" dependencies = [ - "proc-macro2 1.0.18", - "quote 1.0.7", - "syn 1.0.34", - "synstructure 0.12.4", + "proc-macro2", + "quote", + "syn", + "synstructure", ] [[package]] name = "zip" -version = "0.5.6" +version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58287c28d78507f5f91f2a4cf1e8310e2c76fd4c6932f93ac60fd1ceb402db7d" +checksum = "93ab48844d61251bb3835145c521d88aa4031d7139e8485990f60ca911fa0815" dependencies = [ + "byteorder", "crc32fast", - "podio", + "thiserror", ] diff --git a/pool/fuzz/Cargo.toml b/pool/fuzz/Cargo.toml index 0541050ba6..a10d61a7cf 100644 --- a/pool/fuzz/Cargo.toml +++ b/pool/fuzz/Cargo.toml @@ -5,12 +5,9 @@ authors = ["Automatically generated"] publish = false edition = "2018" -[package.metadata] -cargo-fuzz = true - [dependencies] chrono = "0.4.11" -libfuzzer-sys = "0.3" +libfuzzer-sys = "0.4.0" grin_chain = { path = "../../chain" } grin_core = { path = "../../core" } grin_keychain = { path = "../../keychain" } diff --git a/pool/fuzz/fuzz_targets/common.rs b/pool/fuzz/fuzz_targets/common.rs index d5e01f9185..480ecabacd 100644 --- a/pool/fuzz/fuzz_targets/common.rs +++ b/pool/fuzz/fuzz_targets/common.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,11 +18,10 @@ use self::chain::types::{NoopAdapter, Options}; use self::chain::Chain; use self::core::consensus; use self::core::core::hash::Hash; -use self::core::core::verifier_cache::{LruVerifierCache, VerifierCache}; use self::core::core::{Block, BlockHeader, BlockSums, KernelFeatures, Transaction}; use self::core::genesis; use self::core::global; -use self::core::libtx::{build, reward, ProofBuilder}; +use self::core::libtx::{build, reward, ProofBuilder, DEFAULT_BASE_FEE}; use self::core::pow; use self::keychain::{ExtKeychain, ExtKeychainPath, Keychain}; use self::pool::types::*; @@ -91,8 +90,8 @@ impl BlockChain for ChainAdapter { fn validate_tx(&self, tx: &Transaction) -> Result<(), pool::PoolError> { self.chain.validate_tx(tx).map_err(|e| match e.kind() { - chain::ErrorKind::Transaction(txe) => txe.into(), - chain::ErrorKind::NRDRelativeHeight => PoolError::NRDKernelRelativeHeight, + chain::Error::Transaction(txe) => txe.into(), + chain::Error::NRDRelativeHeight => PoolError::NRDKernelRelativeHeight, _ => PoolError::Other("failed to validate tx".into()), }) } @@ -120,7 +119,7 @@ pub fn clean_output_dir(db_root: String) { pub struct PoolFuzzer { pub chain: Arc, pub keychain: ExtKeychain, - pub pool: TransactionPool, + pub pool: TransactionPool, } impl PoolFuzzer { @@ -131,15 +130,11 @@ impl PoolFuzzer { let genesis = genesis_block(&keychain); let chain = Arc::new(Self::init_chain(db_root, genesis)); - let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); // Initialize a new pool with our chain adapter. - let pool = Self::init_transaction_pool( - Arc::new(ChainAdapter { - chain: chain.clone(), - }), - verifier_cache.clone(), - ); + let pool = Self::init_transaction_pool(Arc::new(ChainAdapter { + chain: chain.clone(), + })); let ret = Self { chain, @@ -239,30 +234,24 @@ impl PoolFuzzer { } fn init_chain(dir_name: &str, genesis: Block) -> Chain { - let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); Chain::init( dir_name.to_string(), Arc::new(NoopAdapter {}), genesis, pow::verify_size, - verifier_cache, false, ) .unwrap() } // Same as from pool/tests/common.rs - fn init_transaction_pool( - chain: Arc, - verifier_cache: Arc>, - ) -> TransactionPool + fn init_transaction_pool(chain: Arc) -> TransactionPool where B: BlockChain, - V: VerifierCache + 'static, { TransactionPool::new( PoolConfig { - accept_fee_base: 0, + accept_fee_base: DEFAULT_BASE_FEE, max_pool_size: 50, max_stempool_size: 50, mineable_max_weight: 10_000, diff --git a/pool/src/lib.rs b/pool/src/lib.rs index 790b3537eb..60a70da787 100644 --- a/pool/src/lib.rs +++ b/pool/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/pool/src/pool.rs b/pool/src/pool.rs index 2350975aaf..5930e2bf8b 100644 --- a/pool/src/pool.rs +++ b/pool/src/pool.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,41 +18,36 @@ use self::core::core::hash::{Hash, Hashed}; use self::core::core::id::{ShortId, ShortIdentifiable}; use self::core::core::transaction; -use self::core::core::verifier_cache::VerifierCache; use self::core::core::{ Block, BlockHeader, BlockSums, Committed, OutputIdentifier, Transaction, TxKernel, Weighting, }; -use self::util::RwLock; use crate::types::{BlockChain, PoolEntry, PoolError}; use grin_core as core; use grin_util as util; use std::cmp::Reverse; use std::collections::{HashMap, HashSet}; use std::sync::Arc; +use util::static_secp_instance; -pub struct Pool +pub struct Pool where B: BlockChain, - V: VerifierCache, { /// Entries in the pool (tx + info + timer) in simple insertion order. pub entries: Vec, /// The blockchain pub blockchain: Arc, - pub verifier_cache: Arc>, pub name: String, } -impl Pool +impl Pool where B: BlockChain, - V: VerifierCache + 'static, { - pub fn new(chain: Arc, verifier_cache: Arc>, name: String) -> Self { + pub fn new(chain: Arc, name: String) -> Self { Pool { entries: vec![], blockchain: chain, - verifier_cache, name, } } @@ -160,7 +155,8 @@ where let tx = transaction::aggregate(&txs)?; // Validate the single aggregate transaction "as pool", not subject to tx weight limits. - tx.validate(Weighting::NoLimit, self.verifier_cache.clone())?; + let header = self.blockchain.chain_head()?; + tx.validate(Weighting::NoLimit, header.height)?; Ok(Some(tx)) } @@ -228,7 +224,7 @@ where ) -> Result { // Validate the tx, conditionally checking against weight limits, // based on weight verification type. - tx.validate(weighting, self.verifier_cache.clone())?; + tx.validate(weighting, header.height)?; // Validate the tx against current chain state. // Check all inputs are in the current UTXO set. @@ -303,9 +299,13 @@ where tx: &Transaction, header: &BlockHeader, ) -> Result { - let overage = tx.overage(); + let overage = tx.overage(header.height); - let offset = header.total_kernel_offset().add(&tx.offset)?; + let offset = { + let secp = static_secp_instance(); + let secp = secp.lock(); + header.total_kernel_offset().add(&tx.offset, &secp) + }?; let block_sums = self.blockchain.get_block_sums(&header.hash())?; @@ -335,19 +335,19 @@ where // Use our bucket logic to identify the best transaction for eviction and evict it. // We want to avoid evicting a transaction where another transaction depends on it. - // We want to evict a transaction with low fee_to_weight. + // We want to evict a transaction with low fee_rate. pub fn evict_transaction(&mut self) { if let Some(evictable_transaction) = self.bucket_transactions(Weighting::NoLimit).last() { self.entries.retain(|x| x.tx != *evictable_transaction); }; } - /// Buckets consist of a vec of txs and track the aggregate fee_to_weight. + /// Buckets consist of a vec of txs and track the aggregate fee_rate. /// We aggregate (cut-through) dependent transactions within a bucket *unless* adding a tx - /// would reduce the aggregate fee_to_weight, in which case we start a new bucket. - /// Note this new bucket will by definition have a lower fee_to_weight than the bucket + /// would reduce the aggregate fee_rate, in which case we start a new bucket. + /// Note this new bucket will by definition have a lower fee_rate than the bucket /// containing the tx it depends on. - /// Sorting the buckets by fee_to_weight will therefore preserve dependency ordering, + /// Sorting the buckets by fee_rate will therefore preserve dependency ordering, /// maximizing both cut-through and overall fees. fn bucket_transactions(&self, weighting: Weighting) -> Vec { let mut tx_buckets: Vec = Vec::new(); @@ -389,13 +389,14 @@ where continue; } + let height = self.blockchain.chain_head().map(|x| x.height).unwrap_or(0); match insert_pos { None => { // No parent tx, just add to the end in its own bucket. // This is the common case for non 0-conf txs in the txpool. // We assume the tx is valid here as we validated it on the way into the txpool. insert_pos = Some(tx_buckets.len()); - tx_buckets.push(Bucket::new(entry.tx.clone(), tx_buckets.len())); + tx_buckets.push(Bucket::new(entry.tx.clone(), tx_buckets.len(), height)); } Some(pos) => { // We found a single parent tx, so aggregate in the bucket @@ -403,19 +404,21 @@ where // Otherwise discard and let the next block pick this tx up. let bucket = &tx_buckets[pos]; - if let Ok(new_bucket) = bucket.aggregate_with_tx( - entry.tx.clone(), - weighting, - self.verifier_cache.clone(), - ) { - if new_bucket.fee_to_weight >= bucket.fee_to_weight { - // Only aggregate if it would not reduce the fee_to_weight ratio. + if let Ok(new_bucket) = + bucket.aggregate_with_tx(entry.tx.clone(), weighting, height) + { + if new_bucket.fee_rate >= bucket.fee_rate { + // Only aggregate if it would not reduce the fee_rate ratio. tx_buckets[pos] = new_bucket; } else { // Otherwise put it in its own bucket at the end. - // Note: This bucket will have a lower fee_to_weight + // Note: This bucket will have a lower fee_rate // than the bucket it depends on. - tx_buckets.push(Bucket::new(entry.tx.clone(), tx_buckets.len())); + tx_buckets.push(Bucket::new( + entry.tx.clone(), + tx_buckets.len(), + height, + )); } } else { // Aggregation failed so discard this new tx. @@ -437,11 +440,11 @@ where } } - // Sort buckets by fee_to_weight (descending) and age (oldest first). - // Txs with highest fee_to_weight will be prioritied. - // Aggregation that increases the fee_to_weight of a bucket will prioritize the bucket. + // Sort buckets by fee_rate (descending) and age (oldest first). + // Txs with highest fee_rate will be prioritied. + // Aggregation that increases the fee_rate of a bucket will prioritize the bucket. // Oldest (based on pool insertion time) will then be prioritized. - tx_buckets.sort_unstable_by_key(|x| (Reverse(x.fee_to_weight), x.age_idx)); + tx_buckets.sort_unstable_by_key(|x| (Reverse(x.fee_rate), x.age_idx)); tx_buckets.into_iter().flat_map(|x| x.raw_txs).collect() } @@ -499,18 +502,18 @@ where struct Bucket { raw_txs: Vec, - fee_to_weight: u64, + fee_rate: u64, age_idx: usize, } impl Bucket { /// Construct a new bucket with the given tx. /// also specifies an "age_idx" so we can sort buckets by age - /// as well as fee_to_weight. Txs are maintainedin the pool in insert order + /// as well as fee_rate. Txs are maintained in the pool in insert order /// so buckets with low age_idx contain oldest txs. - fn new(tx: Transaction, age_idx: usize) -> Bucket { + fn new(tx: Transaction, age_idx: usize, height: u64) -> Bucket { Bucket { - fee_to_weight: tx.fee_to_weight(), + fee_rate: tx.fee_rate(height), raw_txs: vec![tx], age_idx, } @@ -520,14 +523,14 @@ impl Bucket { &self, new_tx: Transaction, weighting: Weighting, - verifier_cache: Arc>, + height: u64, ) -> Result { let mut raw_txs = self.raw_txs.clone(); raw_txs.push(new_tx); let agg_tx = transaction::aggregate(&raw_txs)?; - agg_tx.validate(weighting, verifier_cache)?; + agg_tx.validate(weighting, height)?; Ok(Bucket { - fee_to_weight: agg_tx.fee_to_weight(), + fee_rate: agg_tx.fee_rate(height), raw_txs: raw_txs, age_idx: self.age_idx, }) diff --git a/pool/src/transaction_pool.rs b/pool/src/transaction_pool.rs index f3b5de75a5..1a9c326334 100644 --- a/pool/src/transaction_pool.rs +++ b/pool/src/transaction_pool.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,7 +19,6 @@ use self::core::core::hash::{Hash, Hashed}; use self::core::core::id::ShortId; -use self::core::core::verifier_cache::VerifierCache; use self::core::core::{ transaction, Block, BlockHeader, HeaderVersion, OutputIdentifier, Transaction, Weighting, }; @@ -37,53 +36,40 @@ use std::collections::VecDeque; use std::sync::Arc; /// Transaction pool implementation. -pub struct TransactionPool +pub struct TransactionPool where B: BlockChain, P: PoolAdapter, - V: VerifierCache, { /// Pool Config pub config: PoolConfig, /// Our transaction pool. - pub txpool: Pool, + pub txpool: Pool, /// Our Dandelion "stempool". - pub stempool: Pool, + pub stempool: Pool, /// Cache of previous txs in case of a re-org. pub reorg_cache: Arc>>, /// The blockchain pub blockchain: Arc, - pub verifier_cache: Arc>, /// The pool adapter pub adapter: Arc

, ///the replay attack cache pub replay_verifier_cache: Arc>>, } -impl TransactionPool +impl TransactionPool where B: BlockChain, P: PoolAdapter, - V: VerifierCache + 'static, { /// Create a new transaction pool - pub fn new( - config: PoolConfig, - chain: Arc, - verifier_cache: Arc>, - adapter: Arc

, - ) -> Self { + pub fn new(config: PoolConfig, chain: Arc, adapter: Arc

) -> Self { TransactionPool { config, - txpool: Pool::new(chain.clone(), verifier_cache.clone(), "txpool".to_string()), - stempool: Pool::new( - chain.clone(), - verifier_cache.clone(), - "stempool".to_string(), - ), + txpool: Pool::new(chain.clone(), "txpool".to_string()), + stempool: Pool::new(chain.clone(), "stempool".to_string()), reorg_cache: Arc::new(RwLock::new(VecDeque::new())), blockchain: chain, - verifier_cache, adapter, replay_verifier_cache: Arc::new(RwLock::new(LruCache::new(100))), } @@ -112,6 +98,7 @@ where if cache.len() > self.config.max_pool_size { let _ = cache.pop_front(); } + debug!("added tx to reorg_cache: size now {}", cache.len()); } // Deaggregate this tx against the txpool. @@ -187,7 +174,7 @@ where // NRD kernels only valid post HF3 and if NRD feature enabled. self.verify_kernel_variants(tx, header)?; - // Do we have the capacity to accept this transaction? + // Does this transaction pay the required fees and fit within the pool capacity? let acceptability = self.is_acceptable(tx, stem); let mut evict = false; if !stem && acceptability.as_ref().err() == Some(&PoolError::OverCapacity) { @@ -198,7 +185,7 @@ where // Make sure the transaction is valid before anything else. // Validate tx accounting for max tx weight. - tx.validate(Weighting::AsTransaction, self.verifier_cache.clone()) + tx.validate(Weighting::AsTransaction, header.height) .map_err(PoolError::InvalidTx)?; // Check the tx lock_time is valid based on current chain state. @@ -298,14 +285,15 @@ where }; // Validate the tx to ensure our converted inputs are correct. - tx.validate(Weighting::AsTransaction, self.verifier_cache.clone())?; + let header = self.chain_head()?; + tx.validate(Weighting::AsTransaction, header.height)?; Ok(PoolEntry::new(tx, entry.src)) } // Evict a transaction from the txpool. // Uses bucket logic to identify the "last" transaction. - // No other tx depends on it and it has low fee_to_weight. + // No other tx depends on it and it has low fee_rate pub fn evict_from_txpool(&mut self) { self.txpool.evict_transaction() } @@ -422,14 +410,12 @@ where return Err(PoolError::OverCapacity); } - // for a basic transaction (1 input, 2 outputs) - - // (-1 * 1) + (4 * 2) + 1 = 8 - // 8 * 10 = 80 - if self.config.accept_fee_base > 0 { - let threshold = (tx.tx_weight() as u64) * self.config.accept_fee_base; - if tx.fee() < threshold { - return Err(PoolError::LowFeeTransaction(threshold)); - } + // weight for a basic transaction (2 inputs, 2 outputs, 1 kernel) - + // (2 * 1) + (2 * 21) + (1 * 3) = 47 + // minfees = 47 * 500_000 = 23_500_000 + let header = self.chain_head()?; + if tx.shifted_fee(header.height) < tx.accept_fee(header.height) { + return Err(PoolError::LowFeeTransaction(tx.shifted_fee(header.height))); } Ok(()) } diff --git a/pool/src/types.rs b/pool/src/types.rs index cbe8c410a5..d6adaeba73 100644 --- a/pool/src/types.rs +++ b/pool/src/types.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -21,8 +21,8 @@ use self::core::core::committed; use self::core::core::hash::Hash; use self::core::core::transaction::{self, Transaction}; use self::core::core::{BlockHeader, BlockSums, Inputs, OutputIdentifier}; +use self::core::global::DEFAULT_ACCEPT_FEE_BASE; use chrono::prelude::*; -use failure::Fail; use grin_core as core; use grin_keychain as keychain; @@ -138,8 +138,9 @@ impl Default for PoolConfig { } } -fn default_accept_fee_base() -> u64 { - consensus::MILLI_GRIN +/// make output (of weight 21) cost about 1 Grin-cent by default, keeping a round number +pub fn default_accept_fee_base() -> u64 { + DEFAULT_ACCEPT_FEE_BASE } fn default_max_pool_size() -> usize { 150_000 @@ -202,56 +203,56 @@ impl TxSource { } /// Possible errors when interacting with the transaction pool. -#[derive(Debug, Fail, PartialEq)] +#[derive(Debug, thiserror::Error, PartialEq)] pub enum PoolError { /// An invalid pool entry caused by underlying tx validation error - #[fail(display = "Tx Pool Invalid Tx {}", _0)] + #[error("Tx Pool Invalid Tx {0}")] InvalidTx(transaction::Error), /// An invalid pool entry caused by underlying block validation error - #[fail(display = "Tx Pool Invalid Block {}", _0)] + #[error("Tx Pool Invalid Block {0}")] InvalidBlock(block::Error), /// Underlying keychain error. - #[fail(display = "Tx Pool Keychain error {}", _0)] + #[error("Tx Pool Keychain error {0}")] Keychain(keychain::Error), /// Underlying "committed" error. - #[fail(display = "Tx Pool Committed error {}", _0)] + #[error("Tx Pool Committed error {0}")] Committed(committed::Error), /// Attempt to add a transaction to the pool with lock_height /// greater than height of current block - #[fail(display = "Tx Pool Immature transaction")] + #[error("Tx Pool Immature transaction")] ImmatureTransaction, /// Attempt to spend a coinbase output before it has sufficiently matured. - #[fail(display = "Tx Pool Immature coinbase")] + #[error("Tx Pool Immature coinbase")] ImmatureCoinbase, /// Problem propagating a stem tx to the next Dandelion relay node. - #[fail(display = "Tx Pool Dandelion error")] + #[error("Tx Pool Dandelion error")] DandelionError, /// Transaction pool is over capacity, can't accept more transactions - #[fail(display = "Tx Pool Over capacity")] + #[error("Tx Pool Over capacity")] OverCapacity, /// Transaction fee is too low given its weight - #[fail(display = "Tx Pool Low fee transaction {}", _0)] + #[error("Tx Pool Low fee transaction {0}")] LowFeeTransaction(u64), /// Attempt to add a duplicate output to the pool. - #[fail(display = "Tx Pool Duplicate commitment")] + #[error("Tx Pool Duplicate commitment")] DuplicateCommitment, /// Attempt to add a duplicate kernel or output duplicate to spent to the pool. - #[fail(display = "Tx Pool Duplicate kernel or duplicate output to spent")] - DuplicateKernelOrDuplicateSpent, + #[error("Tx Pool Duplicate kernel or duplicate output to spent, {0}")] + DuplicateKernelOrDuplicateSpent(String), /// Attempt to add a duplicate tx to the pool. - #[fail(display = "Tx Pool Duplicate tx")] + #[error("Tx Pool Duplicate tx")] DuplicateTx, /// NRD kernels will not be accepted by the txpool/stempool pre-HF3. - #[fail(display = "NRD kernel pre-HF3")] + #[error("NRD kernel pre-HF3")] NRDKernelPreHF3, /// NRD kernels are not valid if disabled locally via "feature flag". - #[fail(display = "NRD kernel not enabled")] + #[error("NRD kernel not enabled")] NRDKernelNotEnabled, /// NRD kernels are not valid if relative_height rule not met. - #[fail(display = "NRD kernel relative height")] + #[error("NRD kernel relative height")] NRDKernelRelativeHeight, /// Other kinds of error (not yet pulled out into meaningful errors). - #[fail(display = "Tx Pool General error {}", _0)] + #[error("Tx Pool General error {0}")] Other(String), } diff --git a/pool/tests/block_building.rs b/pool/tests/block_building.rs index b9d8c70d3d..c09bf506ac 100644 --- a/pool/tests/block_building.rs +++ b/pool/tests/block_building.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,11 +15,9 @@ pub mod common; use self::core::core::hash::Hashed; -use self::core::core::verifier_cache::LruVerifierCache; use self::core::global; use self::keychain::{ExtKeychain, Keychain}; use self::pool::PoolError; -use self::util::RwLock; use crate::common::*; use grin_core as core; use grin_keychain as keychain; @@ -31,6 +29,7 @@ use std::sync::Arc; fn test_transaction_pool_block_building() -> Result<(), PoolError> { util::init_test_logger(); global::set_local_chain_type(global::ChainTypes::AutomatedTesting); + global::set_local_accept_fee_base(1); let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap(); let db_root = "target/.block_building"; @@ -38,35 +37,33 @@ fn test_transaction_pool_block_building() -> Result<(), PoolError> { let genesis = genesis_block(&keychain); let chain = Arc::new(init_chain(db_root, genesis)); - let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); // Initialize a new pool with our chain adapter. - let mut pool = init_transaction_pool( - Arc::new(ChainAdapter { - chain: chain.clone(), - }), - verifier_cache, - ); + let mut pool = init_transaction_pool(Arc::new(ChainAdapter { + chain: chain.clone(), + })); - add_some_blocks(&chain, 3, &keychain); + // mine enough blocks to get past HF4 + add_some_blocks(&chain, 4 * 3, &keychain); let header_1 = chain.get_header_by_height(1).unwrap(); // Now create tx to spend an early coinbase (now matured). // Provides us with some useful outputs to test with. - let initial_tx = test_transaction_spending_coinbase(&keychain, &header_1, vec![10, 20, 30, 40]); + let initial_tx = + test_transaction_spending_coinbase(&keychain, &header_1, vec![100, 200, 300, 400]); // Mine that initial tx so we can spend it with multiple txs. add_block(&chain, &[initial_tx], &keychain); let header = chain.head_header().unwrap(); - let root_tx_1 = test_transaction(&keychain, vec![10, 20], vec![24]); - let root_tx_2 = test_transaction(&keychain, vec![30], vec![28]); - let root_tx_3 = test_transaction(&keychain, vec![40], vec![38]); + let root_tx_1 = test_transaction(&keychain, vec![100, 200], vec![240]); + let root_tx_2 = test_transaction(&keychain, vec![300], vec![270]); + let root_tx_3 = test_transaction(&keychain, vec![400], vec![370]); - let child_tx_1 = test_transaction(&keychain, vec![24], vec![22]); - let child_tx_2 = test_transaction(&keychain, vec![38], vec![32]); + let child_tx_1 = test_transaction(&keychain, vec![240], vec![210]); + let child_tx_2 = test_transaction(&keychain, vec![370], vec![320]); { // Add the three root txs to the pool. diff --git a/pool/tests/block_max_weight.rs b/pool/tests/block_max_weight.rs index 40747fbfa9..68e3322fe2 100644 --- a/pool/tests/block_max_weight.rs +++ b/pool/tests/block_max_weight.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,10 +16,8 @@ pub mod common; use self::core::core::hash::Hashed; -use self::core::core::verifier_cache::LruVerifierCache; use self::core::global; use self::keychain::{ExtKeychain, Keychain}; -use self::util::RwLock; use crate::common::*; use grin_core as core; use grin_keychain as keychain; @@ -30,6 +28,7 @@ use std::sync::Arc; fn test_block_building_max_weight() { util::init_test_logger(); global::set_local_chain_type(global::ChainTypes::AutomatedTesting); + global::set_local_accept_fee_base(1); let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap(); @@ -38,24 +37,24 @@ fn test_block_building_max_weight() { let genesis = genesis_block(&keychain); let chain = Arc::new(init_chain(db_root, genesis)); - let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); // Initialize a new pool with our chain adapter. - let mut pool = init_transaction_pool( - Arc::new(ChainAdapter { - chain: chain.clone(), - }), - verifier_cache, - ); + let mut pool = init_transaction_pool(Arc::new(ChainAdapter { + chain: chain.clone(), + })); - add_some_blocks(&chain, 3, &keychain); + // mine past HF4 to see effect of set_local_accept_fee_base + add_some_blocks(&chain, 4 * 3, &keychain); let header_1 = chain.get_header_by_height(1).unwrap(); // Now create tx to spend an early coinbase (now matured). // Provides us with some useful outputs to test with. - let initial_tx = - test_transaction_spending_coinbase(&keychain, &header_1, vec![100, 200, 300, 1000]); + let initial_tx = test_transaction_spending_coinbase( + &keychain, + &header_1, + vec![1_000_000, 2_000_000, 3_000_000, 10_000_000], + ); // Mine that initial tx so we can spend it with multiple txs. add_block(&chain, &[initial_tx], &keychain); @@ -65,26 +64,32 @@ fn test_block_building_max_weight() { // Build some dependent txs to add to the txpool. // We will build a block from a subset of these. let txs = vec![ - test_transaction(&keychain, vec![1000], vec![390, 130, 120, 110]), - test_transaction(&keychain, vec![100], vec![90, 1]), - test_transaction(&keychain, vec![90], vec![80, 2]), - test_transaction(&keychain, vec![200], vec![199]), - test_transaction(&keychain, vec![300], vec![290, 3]), - test_transaction(&keychain, vec![290], vec![280, 4]), + test_transaction( + &keychain, + vec![10_000_000], + vec![3_900_000, 1_300_000, 1_200_000, 1_100_000], + ), + test_transaction(&keychain, vec![1_000_000], vec![900_000, 10_000]), + test_transaction(&keychain, vec![900_000], vec![800_000, 20_000]), + test_transaction(&keychain, vec![2_000_000], vec![1_970_000]), + test_transaction(&keychain, vec![3_000_000], vec![2_900_000, 30_000]), + test_transaction(&keychain, vec![2_900_000], vec![2_800_000, 40_000]), ]; // Fees and weights of our original txs in insert order. assert_eq!( - txs.iter().map(|x| x.fee()).collect::>(), - [250, 9, 8, 1, 7, 6] + txs.iter().map(|x| x.fee(header.height)).collect::>(), + [2_500_000, 90_000, 80_000, 30_000, 70_000, 60_000] ); assert_eq!( - txs.iter().map(|x| x.tx_weight()).collect::>(), - [16, 8, 8, 4, 8, 8] + txs.iter().map(|x| x.weight()).collect::>(), + [88, 46, 46, 25, 46, 46] ); assert_eq!( - txs.iter().map(|x| x.fee_to_weight()).collect::>(), - [15625, 1125, 1000, 250, 875, 750] + txs.iter() + .map(|x| x.fee_rate(header.height)) + .collect::>(), + [28409, 1956, 1739, 1200, 1521, 1304] ); // Populate our txpool with the txs. @@ -101,16 +106,18 @@ fn test_block_building_max_weight() { // Fees and weights of the "mineable" txs. assert_eq!( - txs.iter().map(|x| x.fee()).collect::>(), - [250, 9, 8, 7] + txs.iter().map(|x| x.fee(header.height)).collect::>(), + [2_500_000, 90_000, 80_000, 70_000] ); assert_eq!( - txs.iter().map(|x| x.tx_weight()).collect::>(), - [16, 8, 8, 8] + txs.iter().map(|x| x.weight()).collect::>(), + [88, 46, 46, 46] ); assert_eq!( - txs.iter().map(|x| x.fee_to_weight()).collect::>(), - [15625, 1125, 1000, 875] + txs.iter() + .map(|x| x.fee_rate(header.height)) + .collect::>(), + [28409, 1956, 1739, 1521] ); add_block(&chain, &txs, &keychain); diff --git a/pool/tests/block_reconciliation.rs b/pool/tests/block_reconciliation.rs index 0250a55812..f0c0837dc5 100644 --- a/pool/tests/block_reconciliation.rs +++ b/pool/tests/block_reconciliation.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,10 +15,8 @@ pub mod common; use self::core::core::hash::Hashed; -use self::core::core::verifier_cache::LruVerifierCache; use self::core::global; use self::keychain::{ExtKeychain, Keychain}; -use self::util::RwLock; use crate::common::ChainAdapter; use crate::common::*; use grin_core as core; @@ -30,6 +28,7 @@ use std::sync::Arc; fn test_transaction_pool_block_reconciliation() { util::init_test_logger(); global::set_local_chain_type(global::ChainTypes::AutomatedTesting); + global::set_local_accept_fee_base(1); let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap(); let db_root = "target/.block_reconciliation"; @@ -37,23 +36,21 @@ fn test_transaction_pool_block_reconciliation() { let genesis = genesis_block(&keychain); let chain = Arc::new(init_chain(db_root, genesis)); - let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); // Initialize a new pool with our chain adapter. - let mut pool = init_transaction_pool( - Arc::new(ChainAdapter { - chain: chain.clone(), - }), - verifier_cache, - ); + let mut pool = init_transaction_pool(Arc::new(ChainAdapter { + chain: chain.clone(), + })); - add_some_blocks(&chain, 3, &keychain); + // mine past HF4 to see effect of set_local_accept_fee_base + add_some_blocks(&chain, 4 * 3, &keychain); let header_1 = chain.get_header_by_height(1).unwrap(); // Now create tx to spend an early coinbase (now matured). // Provides us with some useful outputs to test with. - let initial_tx = test_transaction_spending_coinbase(&keychain, &header_1, vec![10, 20, 30, 40]); + let initial_tx = + test_transaction_spending_coinbase(&keychain, &header_1, vec![1_000, 2_000, 3_000, 4_000]); // Mine that initial tx so we can spend it with multiple txs. add_block(&chain, &[initial_tx], &keychain); @@ -66,34 +63,34 @@ fn test_transaction_pool_block_reconciliation() { // 2. A transaction that should be invalidated because the input is // consumed in the block, although it is not exactly consumed. // 3. A transaction that should remain after block reconciliation. - let block_transaction = test_transaction(&keychain, vec![10], vec![8]); - let conflict_transaction = test_transaction(&keychain, vec![20], vec![12, 6]); - let valid_transaction = test_transaction(&keychain, vec![30], vec![13, 15]); + let block_transaction = test_transaction(&keychain, vec![1_000], vec![800]); + let conflict_transaction = test_transaction(&keychain, vec![2_000], vec![1_200, 600]); + let valid_transaction = test_transaction(&keychain, vec![3_000], vec![1_300, 1_500]); // We will also introduce a few children: // 4. A transaction that descends from transaction 1, that is in // turn exactly contained in the block. - let block_child = test_transaction(&keychain, vec![8], vec![5, 1]); + let block_child = test_transaction(&keychain, vec![800], vec![500, 100]); // 5. A transaction that descends from transaction 4, that is not // contained in the block at all and should be valid after // reconciliation. - let pool_child = test_transaction(&keychain, vec![5], vec![3]); + let pool_child = test_transaction(&keychain, vec![500], vec![300]); // 6. A transaction that descends from transaction 2 that does not // conflict with anything in the block in any way, but should be // invalidated (orphaned). - let conflict_child = test_transaction(&keychain, vec![12], vec![2]); + let conflict_child = test_transaction(&keychain, vec![1_200], vec![200]); // 7. A transaction that descends from transaction 2 that should be // valid due to its inputs being satisfied by the block. - let conflict_valid_child = test_transaction(&keychain, vec![6], vec![4]); + let conflict_valid_child = test_transaction(&keychain, vec![600], vec![400]); // 8. A transaction that descends from transaction 3 that should be // invalidated due to an output conflict. - let valid_child_conflict = test_transaction(&keychain, vec![13], vec![9]); + let valid_child_conflict = test_transaction(&keychain, vec![1_300], vec![900]); // 9. A transaction that descends from transaction 3 that should remain // valid after reconciliation. - let valid_child_valid = test_transaction(&keychain, vec![15], vec![11]); + let valid_child_valid = test_transaction(&keychain, vec![1_500], vec![1_100]); // 10. A transaction that descends from both transaction 6 and // transaction 9 - let mixed_child = test_transaction(&keychain, vec![2, 11], vec![7]); + let mixed_child = test_transaction(&keychain, vec![200, 1_100], vec![700]); let txs_to_add = vec![ block_transaction, @@ -122,13 +119,13 @@ fn test_transaction_pool_block_reconciliation() { // Now we prepare the block that will cause the above conditions to be met. // First, the transactions we want in the block: // - Copy of 1 - let block_tx_1 = test_transaction(&keychain, vec![10], vec![8]); + let block_tx_1 = test_transaction(&keychain, vec![1_000], vec![800]); // - Conflict w/ 2, satisfies 7 - let block_tx_2 = test_transaction(&keychain, vec![20], vec![6]); + let block_tx_2 = test_transaction(&keychain, vec![2_000], vec![600]); // - Copy of 4 - let block_tx_3 = test_transaction(&keychain, vec![8], vec![5, 1]); + let block_tx_3 = test_transaction(&keychain, vec![800], vec![500, 100]); // - Output conflict w/ 8 - let block_tx_4 = test_transaction(&keychain, vec![40], vec![9, 31]); + let block_tx_4 = test_transaction(&keychain, vec![4_000], vec![900, 2_900]); let block_txs = &[block_tx_1, block_tx_2, block_tx_3, block_tx_4]; add_block(&chain, block_txs, &keychain); diff --git a/pool/tests/coinbase_maturity.rs b/pool/tests/coinbase_maturity.rs index dfc2b2db2f..076f8dacc1 100644 --- a/pool/tests/coinbase_maturity.rs +++ b/pool/tests/coinbase_maturity.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,11 +14,9 @@ pub mod common; -use self::core::core::verifier_cache::LruVerifierCache; use self::core::global; use self::keychain::{ExtKeychain, Keychain}; use self::pool::types::PoolError; -use self::util::RwLock; use crate::common::*; use grin_core as core; use grin_keychain as keychain; @@ -31,6 +29,7 @@ use std::sync::Arc; fn test_coinbase_maturity() { util::init_test_logger(); global::set_local_chain_type(global::ChainTypes::AutomatedTesting); + global::set_local_accept_fee_base(50_000_000); let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap(); let db_root = "target/.coinbase_maturity"; @@ -38,15 +37,11 @@ fn test_coinbase_maturity() { let genesis = genesis_block(&keychain); let chain = Arc::new(init_chain(db_root, genesis)); - let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); // Initialize a new pool with our chain adapter. - let mut pool = init_transaction_pool( - Arc::new(ChainAdapter { - chain: chain.clone(), - }), - verifier_cache, - ); + let mut pool = init_transaction_pool(Arc::new(ChainAdapter { + chain: chain.clone(), + })); // Add a single block, introducing coinbase output to be spent later. add_block(&chain, &[], &keychain); diff --git a/pool/tests/common.rs b/pool/tests/common.rs index b26e996fc1..78dbb8bde3 100644 --- a/pool/tests/common.rs +++ b/pool/tests/common.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,7 +18,6 @@ use self::chain::types::{NoopAdapter, Options}; use self::chain::Chain; use self::core::consensus; use self::core::core::hash::Hash; -use self::core::core::verifier_cache::{LruVerifierCache, VerifierCache}; use self::core::core::{ Block, BlockHeader, BlockSums, Inputs, KernelFeatures, OutputIdentifier, Transaction, TxKernel, }; @@ -29,13 +28,12 @@ use self::core::pow; use self::keychain::{BlindingFactor, ExtKeychain, ExtKeychainPath, Keychain}; use self::pool::types::*; use self::pool::TransactionPool; -use self::util::RwLock; use chrono::Duration; use grin_chain as chain; use grin_core as core; use grin_keychain as keychain; use grin_pool as pool; -use grin_util as util; +use std::convert::TryInto; use std::fs; use std::sync::Arc; @@ -52,13 +50,11 @@ where } pub fn init_chain(dir_name: &str, genesis: Block) -> Chain { - let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); Chain::init( dir_name.to_string(), Arc::new(NoopAdapter {}), genesis, pow::verify_size, - verifier_cache, false, ) .unwrap() @@ -80,7 +76,7 @@ where let prev = chain.head_header().unwrap(); let height = prev.height + 1; let next_header_info = consensus::next_difficulty(height, chain.difficulty_iter().unwrap()); - let fee = txs.iter().map(|x| x.fee()).sum(); + let fee = txs.iter().map(|x| x.fee(height)).sum(); let key_id = ExtKeychainPath::new(1, height as u32, 0, 0, 0).to_identifier(); let reward = reward::output( keychain, @@ -137,9 +133,9 @@ impl BlockChain for ChainAdapter { } fn validate_tx(&self, tx: &Transaction) -> Result<(), pool::PoolError> { - self.chain.validate_tx(tx).map_err(|e| match e.kind() { - chain::ErrorKind::Transaction(txe) => txe.into(), - chain::ErrorKind::NRDRelativeHeight => PoolError::NRDKernelRelativeHeight, + self.chain.validate_tx(tx).map_err(|e| match e { + chain::Error::Transaction { source: txe } => txe.into(), + chain::Error::NRDRelativeHeight => PoolError::NRDKernelRelativeHeight, _ => PoolError::Other("failed to validate tx".into()), }) } @@ -163,30 +159,25 @@ impl BlockChain for ChainAdapter { .map_err(|_| PoolError::ImmatureTransaction) } fn replay_attack_check(&self, tx: &Transaction) -> Result<(), PoolError> { - self.chain - .replay_attack_check(tx) - .map_err(|_| PoolError::DuplicateKernelOrDuplicateSpent) + self.chain.replay_attack_check(tx).map_err(|e| { + PoolError::DuplicateKernelOrDuplicateSpent(format!("Replay attack detected, {}", e)) + }) } } -pub fn init_transaction_pool( - chain: Arc, - verifier_cache: Arc>, -) -> TransactionPool +pub fn init_transaction_pool(chain: Arc) -> TransactionPool where B: BlockChain, - V: VerifierCache + 'static, { TransactionPool::new( PoolConfig { - accept_fee_base: 0, + accept_fee_base: default_accept_fee_base(), reorg_cache_timeout: 1_440, max_pool_size: 50, max_stempool_size: 50, mineable_max_weight: 10_000, }, chain.clone(), - verifier_cache.clone(), Arc::new(NoopPoolAdapter {}), ) } @@ -220,7 +211,9 @@ where } build::transaction( - KernelFeatures::Plain { fee: fees as u64 }, + KernelFeatures::Plain { + fee: (fees as u64).try_into().unwrap(), + }, &tx_elements, keychain, &ProofBuilder::new(keychain), @@ -245,7 +238,9 @@ where keychain, input_values, output_values, - KernelFeatures::Plain { fee: fees as u64 }, + KernelFeatures::Plain { + fee: (fees as u64).try_into().unwrap(), + }, ) } diff --git a/pool/tests/nrd_kernel_relative_height.rs b/pool/tests/nrd_kernel_relative_height.rs index 33f156cbde..eae9b0807f 100644 --- a/pool/tests/nrd_kernel_relative_height.rs +++ b/pool/tests/nrd_kernel_relative_height.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,13 +16,11 @@ pub mod common; use self::core::consensus; use self::core::core::hash::Hashed; -use self::core::core::verifier_cache::LruVerifierCache; use self::core::core::{HeaderVersion, KernelFeatures, NRDRelativeHeight, TxKernel}; use self::core::global; use self::core::libtx::aggsig; use self::keychain::{BlindingFactor, ExtKeychain, Keychain}; use self::pool::types::PoolError; -use self::util::RwLock; use crate::common::*; use grin_core as core; use grin_keychain as keychain; @@ -34,6 +32,7 @@ use std::sync::Arc; fn test_nrd_kernel_relative_height() -> Result<(), PoolError> { util::init_test_logger(); global::set_local_chain_type(global::ChainTypes::AutomatedTesting); + global::set_local_accept_fee_base(10); global::set_local_nrd_enabled(true); let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap(); @@ -43,15 +42,11 @@ fn test_nrd_kernel_relative_height() -> Result<(), PoolError> { let genesis = genesis_block(&keychain); let chain = Arc::new(init_chain(db_root, genesis)); - let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); // Initialize a new pool with our chain adapter. - let mut pool = init_transaction_pool( - Arc::new(ChainAdapter { - chain: chain.clone(), - }), - verifier_cache, - ); + let mut pool = init_transaction_pool(Arc::new(ChainAdapter { + chain: chain.clone(), + })); add_some_blocks(&chain, 3, &keychain); @@ -59,30 +54,33 @@ fn test_nrd_kernel_relative_height() -> Result<(), PoolError> { // Now create tx to spend an early coinbase (now matured). // Provides us with some useful outputs to test with. - let initial_tx = test_transaction_spending_coinbase(&keychain, &header_1, vec![10, 20, 30, 40]); + let initial_tx = + test_transaction_spending_coinbase(&keychain, &header_1, vec![1_000, 2_000, 3_000, 4_000]); // Mine that initial tx so we can spend it with multiple txs. add_block(&chain, &[initial_tx], &keychain); - add_some_blocks(&chain, 5, &keychain); + // mine past HF4 to see effect of set_local_accept_fee_base + add_some_blocks(&chain, 8, &keychain); let header = chain.head_header().unwrap(); - assert_eq!(header.height, consensus::TESTING_THIRD_HARD_FORK); + // Note, in MWC NRD will be activated from Header 3. But 4 for the testing does work well too + assert_eq!(header.height, 4 * consensus::TESTING_HARD_FORK_INTERVAL); assert_eq!(header.version, HeaderVersion(4)); let (tx1, tx2, tx3) = { let mut kernel = TxKernel::with_features(KernelFeatures::NoRecentDuplicate { - fee: 6, + fee: 600.into(), relative_height: NRDRelativeHeight::new(2)?, }); let msg = kernel.msg_to_sign().unwrap(); // Generate a kernel with public excess and associated signature. - let excess = BlindingFactor::rand(); - let skey = excess.secret_key().unwrap(); + let excess = BlindingFactor::rand(keychain.secp()); + let skey = excess.secret_key(keychain.secp()).unwrap(); kernel.excess = keychain.secp().commit(0, skey).unwrap(); - let pubkey = &kernel.excess.to_pubkey().unwrap(); + let pubkey = &kernel.excess.to_pubkey(keychain.secp()).unwrap(); kernel.excess_sig = aggsig::sign_with_blinding(&keychain.secp(), &msg, &excess, Some(&pubkey)).unwrap(); kernel.verify().unwrap(); @@ -95,23 +93,23 @@ fn test_nrd_kernel_relative_height() -> Result<(), PoolError> { let tx1 = test_transaction_with_kernel( &keychain, - vec![10, 20], - vec![24], + vec![1_000, 2_000], + vec![2_400], kernel.clone(), excess.clone(), ); let tx2 = test_transaction_with_kernel( &keychain, - vec![24], - vec![18], + vec![2_400], + vec![1_800], kernel2.clone(), excess.clone(), ); // Now reuse kernel excess for tx3 but with NRD relative_height=1 (and different fee). let mut kernel_short = TxKernel::with_features(KernelFeatures::NoRecentDuplicate { - fee: 3, + fee: 300.into(), relative_height: NRDRelativeHeight::new(1)?, }); let msg_short = kernel_short.msg_to_sign().unwrap(); @@ -123,8 +121,8 @@ fn test_nrd_kernel_relative_height() -> Result<(), PoolError> { let tx3 = test_transaction_with_kernel( &keychain, - vec![18], - vec![15], + vec![1_800], + vec![1_500], kernel_short.clone(), excess.clone(), ); diff --git a/pool/tests/nrd_kernels_disabled.rs b/pool/tests/nrd_kernels_disabled.rs index 90d6ef0b5a..0e228b1a76 100644 --- a/pool/tests/nrd_kernels_disabled.rs +++ b/pool/tests/nrd_kernels_disabled.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,12 +15,10 @@ pub mod common; use self::core::consensus; -use self::core::core::verifier_cache::LruVerifierCache; use self::core::core::{HeaderVersion, KernelFeatures, NRDRelativeHeight}; use self::core::global; use self::keychain::{ExtKeychain, Keychain}; use self::pool::types::PoolError; -use self::util::RwLock; use crate::common::*; use grin_core as core; use grin_keychain as keychain; @@ -41,30 +39,27 @@ fn test_nrd_kernels_disabled() { let genesis = genesis_block(&keychain); let chain = Arc::new(init_chain(db_root, genesis)); - let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); // Initialize a new pool with our chain adapter. - let mut pool = init_transaction_pool( - Arc::new(ChainAdapter { - chain: chain.clone(), - }), - verifier_cache, - ); + let mut pool = init_transaction_pool(Arc::new(ChainAdapter { + chain: chain.clone(), + })); // Add some blocks. add_some_blocks(&chain, 3, &keychain); // Spend the initial coinbase. let header_1 = chain.get_header_by_height(1).unwrap(); - let tx = test_transaction_spending_coinbase(&keychain, &header_1, vec![10, 20, 30, 40]); + let tx = + test_transaction_spending_coinbase(&keychain, &header_1, vec![1_000, 2_000, 3_000, 4_000]); add_block(&chain, &[tx], &keychain); let tx_1 = test_transaction_with_kernel_features( &keychain, - vec![10, 20], - vec![24], + vec![1_000, 2_000], + vec![2_400], KernelFeatures::NoRecentDuplicate { - fee: 6, + fee: 600.into(), relative_height: NRDRelativeHeight::new(1440).unwrap(), }, ); diff --git a/pool/tests/nrd_kernels_enabled.rs b/pool/tests/nrd_kernels_enabled.rs index 7fd77987ae..c2c9bf011a 100644 --- a/pool/tests/nrd_kernels_enabled.rs +++ b/pool/tests/nrd_kernels_enabled.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,12 +15,10 @@ pub mod common; use self::core::consensus; -use self::core::core::verifier_cache::LruVerifierCache; use self::core::core::{HeaderVersion, KernelFeatures, NRDRelativeHeight}; use self::core::global; use self::keychain::{ExtKeychain, Keychain}; use self::pool::types::PoolError; -use self::util::RwLock; use crate::common::*; use grin_core as core; use grin_keychain as keychain; @@ -32,6 +30,7 @@ use std::sync::Arc; fn test_nrd_kernels_enabled() { util::init_test_logger(); global::set_local_chain_type(global::ChainTypes::AutomatedTesting); + global::set_local_accept_fee_base(10); global::set_local_nrd_enabled(true); let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap(); @@ -41,36 +40,37 @@ fn test_nrd_kernels_enabled() { let genesis = genesis_block(&keychain); let chain = Arc::new(init_chain(db_root, genesis)); - let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); // Initialize a new pool with our chain adapter. - let mut pool = init_transaction_pool( - Arc::new(ChainAdapter { - chain: chain.clone(), - }), - verifier_cache, - ); + let mut pool = init_transaction_pool(Arc::new(ChainAdapter { + chain: chain.clone(), + })); // Add some blocks. add_some_blocks(&chain, 3, &keychain); // Spend the initial coinbase. let header_1 = chain.get_header_by_height(1).unwrap(); - let tx = test_transaction_spending_coinbase(&keychain, &header_1, vec![10, 20, 30, 40]); + let mg = consensus::MILLI_GRIN; + let tx = test_transaction_spending_coinbase( + &keychain, + &header_1, + vec![100 * mg, 200 * mg, 300 * mg, 400 * mg], + ); add_block(&chain, &[tx], &keychain); let tx_1 = test_transaction_with_kernel_features( &keychain, - vec![10, 20], - vec![24], + vec![100 * mg, 200 * mg], + vec![240 * mg], KernelFeatures::NoRecentDuplicate { - fee: 6, + fee: (60 * mg as u32).into(), relative_height: NRDRelativeHeight::new(1440).unwrap(), }, ); let header = chain.head_header().unwrap(); - assert!(header.version < HeaderVersion(4)); + assert!(header.version < HeaderVersion(3)); // in MWC activating NRD from V3 assert_eq!( pool.add_to_pool(test_source(), tx_1.clone(), false, &header), @@ -83,7 +83,7 @@ fn test_nrd_kernels_enabled() { assert_eq!(header.height, consensus::TESTING_THIRD_HARD_FORK); assert_eq!(header.version, HeaderVersion(4)); - // NRD kernel support not enabled via feature flag, so not valid. + // NRD kernel support enabled via feature flag, so valid. assert_eq!( pool.add_to_pool(test_source(), tx_1.clone(), false, &header), Ok(()) diff --git a/pool/tests/transaction_pool.rs b/pool/tests/transaction_pool.rs index 5c4b5016bf..7595d7825e 100644 --- a/pool/tests/transaction_pool.rs +++ b/pool/tests/transaction_pool.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,12 +14,10 @@ pub mod common; -use self::core::core::verifier_cache::LruVerifierCache; use self::core::core::{transaction, Weighting}; use self::core::global; use self::keychain::{ExtKeychain, Keychain}; use self::pool::TxSource; -use self::util::RwLock; use crate::common::*; use grin_core as core; use grin_keychain as keychain; @@ -32,6 +30,7 @@ use std::sync::Arc; fn test_the_transaction_pool() { util::init_test_logger(); global::set_local_chain_type(global::ChainTypes::AutomatedTesting); + global::set_local_accept_fee_base(1); let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap(); let db_root = "target/.transaction_pool"; @@ -39,17 +38,14 @@ fn test_the_transaction_pool() { let genesis = genesis_block(&keychain); let chain = Arc::new(init_chain(db_root, genesis)); - let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); // Initialize a new pool with our chain adapter. - let mut pool = init_transaction_pool( - Arc::new(ChainAdapter { - chain: chain.clone(), - }), - verifier_cache.clone(), - ); + let mut pool = init_transaction_pool(Arc::new(ChainAdapter { + chain: chain.clone(), + })); - add_some_blocks(&chain, 3, &keychain); + // mine past HF4 to see effect of set_local_accept_fee_base + add_some_blocks(&chain, 4 * 3, &keychain); let header = chain.head_header().unwrap(); let header_1 = chain.get_header_by_height(1).unwrap(); @@ -74,9 +70,9 @@ fn test_the_transaction_pool() { } // tx1 spends some outputs from the initial test tx. - let tx1 = test_transaction(&keychain, vec![500, 600], vec![499, 599]); + let tx1 = test_transaction(&keychain, vec![500, 600], vec![469, 569]); // tx2 spends some outputs from both tx1 and the initial test tx. - let tx2 = test_transaction(&keychain, vec![499, 700], vec![498]); + let tx2 = test_transaction(&keychain, vec![469, 700], vec![498]); { // Check we have a single initial tx in the pool. @@ -105,7 +101,7 @@ fn test_the_transaction_pool() { // Test adding a duplicate tx with the same input and outputs. // Note: not the *same* tx, just same underlying inputs/outputs. { - let tx1a = test_transaction(&keychain, vec![500, 600], vec![499, 599]); + let tx1a = test_transaction(&keychain, vec![500, 600], vec![469, 569]); assert!(pool .add_to_pool(test_source(), tx1a, false, &header) .is_err()); @@ -113,7 +109,7 @@ fn test_the_transaction_pool() { // Test adding a tx attempting to spend a non-existent output. { - let bad_tx = test_transaction(&keychain, vec![10_001], vec![10_000]); + let bad_tx = test_transaction(&keychain, vec![10_001], vec![9_900]); assert!(pool .add_to_pool(test_source(), bad_tx, false, &header) .is_err()); @@ -130,7 +126,7 @@ fn test_the_transaction_pool() { // Confirm the tx pool correctly identifies an invalid tx (already spent). { - let tx3 = test_transaction(&keychain, vec![500], vec![497]); + let tx3 = test_transaction(&keychain, vec![500], vec![467]); assert!(pool .add_to_pool(test_source(), tx3, false, &header) .is_err()); @@ -139,9 +135,9 @@ fn test_the_transaction_pool() { // Now add a couple of txs to the stempool (stem = true). { - let tx = test_transaction(&keychain, vec![599], vec![598]); + let tx = test_transaction(&keychain, vec![569], vec![538]); pool.add_to_pool(test_source(), tx, true, &header).unwrap(); - let tx2 = test_transaction(&keychain, vec![598], vec![597]); + let tx2 = test_transaction(&keychain, vec![538], vec![507]); pool.add_to_pool(test_source(), tx2, true, &header).unwrap(); assert_eq!(pool.total_size(), 3); assert_eq!(pool.stempool.size(), 2); @@ -165,7 +161,7 @@ fn test_the_transaction_pool() { // Adding a duplicate tx to the stempool will result in it being fluffed. // This handles the case of the stem path having a cycle in it. { - let tx = test_transaction(&keychain, vec![597], vec![596]); + let tx = test_transaction(&keychain, vec![507], vec![476]); pool.add_to_pool(test_source(), tx.clone(), true, &header) .unwrap(); assert_eq!(pool.total_size(), 4); @@ -185,15 +181,14 @@ fn test_the_transaction_pool() { // We will do this be adding a new tx to the pool // that is a superset of a tx already in the pool. { - let tx4 = test_transaction(&keychain, vec![800], vec![799]); + let tx4 = test_transaction(&keychain, vec![800], vec![769]); // tx1 and tx2 are already in the txpool (in aggregated form) // tx4 is the "new" part of this aggregated tx that we care about let agg_tx = transaction::aggregate(&[tx1.clone(), tx2.clone(), tx4]).unwrap(); - agg_tx - .validate(Weighting::AsTransaction, verifier_cache.clone()) - .unwrap(); + let height = 12 + 1; + agg_tx.validate(Weighting::AsTransaction, height).unwrap(); pool.add_to_pool(test_source(), agg_tx, false, &header) .unwrap(); diff --git a/servers/Cargo.toml b/servers/Cargo.toml index 03312b627e..078d5baf82 100644 --- a/servers/Cargo.toml +++ b/servers/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "grin_servers" -version = "4.4.2" +version = "5.3.2" authors = ["Grin Developers "] description = "Simple, private and scalable cryptocurrency implementation based on the MimbleWimble chain format." license = "Apache-2.0" @@ -24,22 +24,22 @@ chrono = "0.4.11" tokio = {version = "0.2", features = ["full"] } tokio-util = { version = "0.2", features = ["codec"] } walkdir = "2.3.1" -failure = "0.1" -failure_derive = "0.1" +thiserror = "1" ed25519-dalek = "1" regex = "1.3" sysinfo = "0.9" dirs = "1.0.3" timer = "0.2" +atomic_float = "1.0" -grin_api = { path = "../api", version = "4.4.2" } -grin_chain = { path = "../chain", version = "4.4.2" } -grin_core = { path = "../core", version = "4.4.2" } -grin_keychain = { path = "../keychain", version = "4.4.2" } -grin_p2p = { path = "../p2p", version = "4.4.2" } -grin_pool = { path = "../pool", version = "4.4.2" } -grin_store = { path = "../store", version = "4.4.2" } -grin_util = { path = "../util", version = "4.4.2" } +grin_api = { path = "../api", version = "5.3.2" } +grin_chain = { path = "../chain", version = "5.3.2" } +grin_core = { path = "../core", version = "5.3.2" } +grin_keychain = { path = "../keychain", version = "5.3.2" } +grin_p2p = { path = "../p2p", version = "5.3.2" } +grin_pool = { path = "../pool", version = "5.3.2" } +grin_store = { path = "../store", version = "5.3.2" } +grin_util = { path = "../util", version = "5.3.2" } # NOTE. We can't have hyper-rustls the same version for Android and non android. because if how rust builds dependency. # Android must have v0.20+ diff --git a/servers/src/common.rs b/servers/src/common.rs index 67cc12b409..0a1569c3be 100644 --- a/servers/src/common.rs +++ b/servers/src/common.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/servers/src/common/adapters.rs b/servers/src/common/adapters.rs index 5ece406800..3f5ebdbedf 100644 --- a/servers/src/common/adapters.rs +++ b/servers/src/common/adapters.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -22,33 +22,42 @@ use std::sync::{Arc, Weak}; use std::thread; use std::time::Instant; +use crate::chain::txhashset::BitmapChunk; use crate::chain::{ self, BlockStatus, ChainAdapter, Options, SyncState, SyncStatus, TxHashsetDownloadStats, }; -use std::collections::HashMap; -use std::sync::Mutex; use crate::common::hooks::{ChainEvents, NetEvents}; use crate::common::types::{ChainValidationMode, DandelionEpoch, ServerConfig}; use crate::core::core::hash::{Hash, Hashed}; use crate::core::core::transaction::Transaction; -use crate::core::core::verifier_cache::VerifierCache; -use crate::core::core::{BlockHeader, BlockSums, CompactBlock, Inputs, OutputIdentifier}; +use crate::core::core::{ + BlockHeader, BlockSums, CompactBlock, Inputs, OutputIdentifier, Segment, SegmentIdentifier, + SegmentType, SegmentTypeIdentifier, TxKernel, +}; use crate::core::pow::Difficulty; use crate::core::ser::ProtocolVersion; use crate::core::{core, global}; use crate::p2p; use crate::p2p::types::PeerInfo; use crate::pool::{self, BlockChain, PoolAdapter}; +use crate::util::secp::pedersen::RangeProof; use crate::util::OneTime; use chrono::prelude::*; use chrono::Duration; +use grin_chain::txhashset::Segmenter; use rand::prelude::*; +use std::ops::Range; use std::sync::atomic::AtomicI64; use std::sync::atomic::Ordering; +const KERNEL_SEGMENT_HEIGHT_RANGE: Range = 9..14; +const BITMAP_SEGMENT_HEIGHT_RANGE: Range = 9..14; +const OUTPUT_SEGMENT_HEIGHT_RANGE: Range = 11..16; +const RANGEPROOF_SEGMENT_HEIGHT_RANGE: Range = 7..12; + // NetToChainAdapter need a memory cache to prevent data overloading for network core nodes (non leaf nodes) -// This cache will drop sequense of the events during the second +// This cache will drop sequence of the events during the second struct EventCache { event: RwLock, time: AtomicI64, @@ -89,16 +98,14 @@ impl EventCache { /// Implementation of the NetAdapter for the . Gets notified when new /// blocks and transactions are received and forwards to the chain and pool /// implementations. -pub struct NetToChainAdapter +pub struct NetToChainAdapter where B: BlockChain, P: PoolAdapter, - V: VerifierCache + 'static, { sync_state: Arc, chain: Weak, - tx_pool: Arc>>, - verifier_cache: Arc>, + tx_pool: Arc>>, peers: OneTime>, config: ServerConfig, hooks: Vec>, @@ -107,17 +114,12 @@ where processed_headers: EventCache, processed_blocks: EventCache, processed_transactions: EventCache, - - header_cache: Arc>>, - tip_processed: Arc>, - reset_tip: Arc>, } -impl p2p::ChainAdapter for NetToChainAdapter +impl p2p::ChainAdapter for NetToChainAdapter where B: BlockChain, P: PoolAdapter, - V: VerifierCache + 'static, { fn total_difficulty(&self) -> Result { Ok(self.chain().head()?.total_difficulty) @@ -154,11 +156,11 @@ where tx: core::Transaction, stem: bool, ) -> Result { + // nothing much we can do with a new transaction while syncing if self.sync_state.is_syncing() { return Ok(true); } - // nothing much we can do with a new transaction while syncing let tx_hash = tx.hash(); // For transaction we allow double processing, we want to be sure that TX will be stored in the pool // because there is no recovery plan for transactions. So we want to use natural retry to help us handle failures @@ -204,7 +206,7 @@ where debug!("block_received, cache for {} OK", b_hash); } - if self.chain().block_exists(b.hash())? { + if self.chain().is_known(&b.header).is_err() { return Ok(true); } @@ -217,7 +219,6 @@ where b.outputs().len(), b.kernels().len(), ); - self.process_block(b, peer_info, opts) } @@ -227,9 +228,10 @@ where peer_info: &PeerInfo, ) -> Result { // No need to process this compact block if we have previously accepted the _full block_. - if self.chain().block_exists(cb.hash())? { + if self.chain().is_known(&cb.header).is_err() { return Ok(true); } + let bhash = cb.hash(); debug!( "Received compact_block {} at {} from {} [out/kern/kern_ids: {}/{}/{}] going to process.", @@ -270,7 +272,7 @@ where .chain() .process_block_header(&cb.header, chain::Options::NONE) { - debug!("Invalid compact block header {}: {:?}", cb_hash, e.kind()); + debug!("Invalid compact block header {}: {:?}", cb_hash, e); return Ok(!e.is_bad_data()); } @@ -308,10 +310,7 @@ where }; if let Ok(prev) = self.chain().get_previous_header(&cb.header) { - if block - .validate(&prev.total_kernel_offset, self.verifier_cache.clone()) - .is_ok() - { + if block.validate(&prev.total_kernel_offset).is_ok() { debug!( "successfully hydrated block: {} at {} ({})", block.header.hash(), @@ -373,11 +372,7 @@ where let res = self.chain().process_block_header(&bh, chain::Options::NONE); if let Err(e) = res { - debug!( - "Block header {} refused by chain: {:?}", - bh.hash(), - e.kind() - ); + debug!("Block header {} refused by chain: {:?}", bh.hash(), e); if e.is_bad_data() { return Ok(false); } else { @@ -399,26 +394,12 @@ where &self, bhs: &[core::BlockHeader], peer_info: &PeerInfo, - header_cache_size: u64, ) -> Result { - let tip_processed = { - let mut tip_processed = self.tip_processed.lock().unwrap(); - let sync_head_height = self.chain().get_sync_head()?.height; - - let mut reset_tip = self.reset_tip.lock().unwrap(); - if *reset_tip != 0 { - warn!( - "reset of tip to {} from {} due to differing headers.", - *reset_tip, *tip_processed - ); - *tip_processed = *reset_tip; - *reset_tip = 0; - } else if *tip_processed < sync_head_height { - *tip_processed = sync_head_height; - } - - *tip_processed - }; + info!( + "Received {} block headers from {}", + bhs.len(), + peer_info.addr + ); if bhs.is_empty() { return Ok(false); @@ -429,118 +410,26 @@ where return Ok(false); } - info!( - "Received {} block headers from {}, height {}, hash = {}, tip_processed = {}", - bhs.len(), - peer_info.addr, - bhs[0].height, - bhs[0].hash(), - tip_processed, - ); - - if bhs[0].height > tip_processed + 1 { - // we can't process this yet. - // try to process anything in the cache that we can - - if header_cache_size > 0 { - for bh in bhs { - let mut hashmap = self.header_cache.lock().unwrap(); - hashmap.insert(bh.height, bh.clone()); - if bh.height > header_cache_size { - hashmap.remove(&(bh.height - header_cache_size)); - } - } - } - return Ok(true); - } - if header_cache_size > 0 { - let mut itt = tip_processed + 1; - let mut bh_backlog: Vec = Vec::new(); - let mut backlog_processed = false; - loop { - { - let hashmap = self.header_cache.lock().unwrap(); - let next = hashmap.get(&itt); - if !next.is_some() { - break; - } - let next = next.unwrap(); - //info!("adding headers to the backlog: {}", next.height); - bh_backlog.push(next.clone()); - } - - if bh_backlog.len() >= 256 { - // getting too big, process and continue - self.process_add_headers_sync(&bh_backlog.as_slice(), header_cache_size)?; - bh_backlog = Vec::new(); - backlog_processed = true; - } - - itt = itt + 1; - } - - if bh_backlog.len() > 0 { - self.process_add_headers_sync(&bh_backlog.as_slice(), header_cache_size)?; - return Ok(true); - } - if backlog_processed { + // Read our sync_head if we are in header_sync. + // If not then we can ignore this batch of headers. + let sync_head = match self.sync_state.status() { + SyncStatus::HeaderSync { sync_head, .. } => sync_head, + _ => { + debug!("headers_received: ignoring as not in header_sync"); return Ok(true); } - } - - let first_height = bhs[0].height; - for bh in bhs { - if header_cache_size > 0 { - // set highest processed block - let mut hashmap = self.header_cache.lock().unwrap(); - let value = hashmap.get(&bh.height); - if value.is_some() { - // we already have something here. - // does it match? If so return. - let cache_value = value.unwrap(); - if bh.prev_hash == cache_value.prev_hash { - if first_height <= tip_processed { - return Ok(true); - } - } else { - // it doesn't match! there must have - // been a reorg or someone gave us bad headers. - // clear the entire hashmap to be safe. - // go back to previous logic at this point hashmap.clear(); - warn!( - "different header value at height = {}. clearing cache.", - bh.height - ); - hashmap.clear(); - *(self.reset_tip.lock().unwrap()) = first_height - 1; - break; - } - } - } - } - self.process_add_headers_sync(bhs, header_cache_size) - } + }; - fn process_add_headers_sync( - &self, - bhs: &[core::BlockHeader], - header_cache_size: u64, - ) -> Result { - let mut hashmap = self.header_cache.lock().unwrap(); // try to add headers to our header chain - match self.chain().sync_block_headers(bhs, chain::Options::SYNC) { - Ok(_) => { - for bh in bhs { - let mut tip_processed = self.tip_processed.lock().unwrap(); - if *tip_processed < bh.height { - *tip_processed = bh.height; - } - if header_cache_size > 0 { - hashmap.insert(bh.height, bh.clone()); - if bh.height > header_cache_size { - hashmap.remove(&(bh.height - header_cache_size)); - } - } + match self + .chain() + .sync_block_headers(bhs, sync_head, chain::Options::SYNC) + { + Ok(sync_head) => { + // If we have an updated sync_head after processing this batch of headers + // then update our sync_state so we can request relevant headers in the next batch. + if let Some(sync_head) = sync_head { + self.sync_state.update_header_sync(sync_head); } Ok(true) } @@ -677,9 +566,9 @@ where if is_bad_data { self.chain().clean_txhashset_sandbox(); error!("Failed to save txhashset archive: bad data"); - self.sync_state.set_sync_error( - chain::ErrorKind::TxHashSetErr("bad txhashset data".to_string()).into(), - ); + self.sync_state.set_sync_error(chain::Error::TxHashSetErr( + "bad txhashset data".to_string(), + )); } else { info!("Received valid txhashset data for {}.", h); } @@ -701,20 +590,272 @@ where fn get_tmpfile_pathname(&self, tmpfile_name: String) -> PathBuf { self.chain().get_tmpfile_pathname(tmpfile_name) } + + fn prepare_segmenter(&self) -> Result { + self.chain().segmenter() + } + + fn get_kernel_segment( + &self, + hash: Hash, + id: SegmentIdentifier, + ) -> Result, chain::Error> { + if !KERNEL_SEGMENT_HEIGHT_RANGE.contains(&id.height) { + return Err(chain::Error::InvalidSegmentHeight); + } + let segmenter = self.chain().segmenter()?; + let head_hash = segmenter.header().hash(); + if head_hash != hash { + return Err(chain::Error::SegmenterHeaderMismatch( + head_hash, + segmenter.header().height, + )); + } + segmenter.kernel_segment(id) + } + + fn get_bitmap_segment( + &self, + hash: Hash, + id: SegmentIdentifier, + ) -> Result, chain::Error> { + if !BITMAP_SEGMENT_HEIGHT_RANGE.contains(&id.height) { + return Err(chain::Error::InvalidSegmentHeight); + } + let segmenter = self.chain().segmenter()?; + let head_hash = segmenter.header().hash(); + if head_hash != hash { + return Err(chain::Error::SegmenterHeaderMismatch( + head_hash, + segmenter.header().height, + )); + } + segmenter.bitmap_segment(id) + } + + fn get_output_segment( + &self, + hash: Hash, + id: SegmentIdentifier, + ) -> Result, chain::Error> { + if !OUTPUT_SEGMENT_HEIGHT_RANGE.contains(&id.height) { + return Err(chain::Error::InvalidSegmentHeight); + } + let segmenter = self.chain().segmenter()?; + let head_hash = segmenter.header().hash(); + if head_hash != hash { + return Err(chain::Error::SegmenterHeaderMismatch( + head_hash, + segmenter.header().height, + )); + } + segmenter.output_segment(id) + } + + fn get_rangeproof_segment( + &self, + hash: Hash, + id: SegmentIdentifier, + ) -> Result, chain::Error> { + if !RANGEPROOF_SEGMENT_HEIGHT_RANGE.contains(&id.height) { + return Err(chain::Error::InvalidSegmentHeight); + } + let segmenter = self.chain().segmenter()?; + let head_hash = segmenter.header().hash(); + if head_hash != hash { + return Err(chain::Error::SegmenterHeaderMismatch( + head_hash, + segmenter.header().height, + )); + } + segmenter.rangeproof_segment(id) + } + + fn receive_bitmap_segment( + &self, + block_hash: Hash, + bitmap_root_hash: Hash, + segment: Segment, + ) -> Result { + info!( + "Received bitmap segment {} for block_hash: {}, bitmap_root_hash: {}", + segment.identifier().idx, + block_hash, + bitmap_root_hash + ); + // TODO: Entire process needs to be restarted if the horizon block + // has changed (perhaps not here, NB this has to go somewhere) + let archive_header = self.chain().txhashset_archive_header_header_only()?; + if archive_header.hash() != block_hash { + return Ok(false); + } + let identifier = segment.identifier().clone(); + let mut retval = Ok(true); + if let Some(d) = self + .chain() + .get_desegmenter(&archive_header) + .write() + .as_mut() + { + let res = d.add_bitmap_segment(segment, bitmap_root_hash); + if let Err(e) = res { + error!( + "Validation of incoming bitmap segment failed: {:?}, reason: {}", + identifier, e + ); + retval = Err(e); + } + } else { + retval = Ok(false); + } + // Remove segment from outgoing list + self.sync_state.remove_pibd_segment(&SegmentTypeIdentifier { + segment_type: SegmentType::Bitmap, + identifier, + }); + retval + } + + fn receive_output_segment( + &self, + block_hash: Hash, + bitmap_root_hash: Hash, + segment: Segment, + ) -> Result { + info!( + "Received output segment {} for block_hash: {}, bitmap_root_hash: {}", + segment.identifier().idx, + block_hash, + bitmap_root_hash + ); + let archive_header = self.chain().txhashset_archive_header_header_only()?; + if archive_header.hash() != block_hash { + return Ok(false); + } + let identifier = segment.identifier().clone(); + let mut retval = Ok(true); + if let Some(d) = self + .chain() + .get_desegmenter(&archive_header) + .write() + .as_mut() + { + let res = d.add_output_segment(segment, bitmap_root_hash); + if let Err(e) = res { + error!( + "Validation of incoming output segment failed: {:?}, reason: {}", + identifier, e + ); + retval = Err(e); + } + } else { + retval = Ok(false); + } + // Remove segment from outgoing list + self.sync_state.remove_pibd_segment(&SegmentTypeIdentifier { + segment_type: SegmentType::Output, + identifier, + }); + retval + } + + fn receive_rangeproof_segment( + &self, + block_hash: Hash, + bitmap_root_hash: Hash, + segment: Segment, + ) -> Result { + info!( + "Received proof segment {} for block_hash: {}, bitmap_root_hash: {}", + segment.identifier().idx, + block_hash, + bitmap_root_hash + ); + let archive_header = self.chain().txhashset_archive_header_header_only()?; + if archive_header.hash() != block_hash { + return Ok(false); + } + let identifier = segment.identifier().clone(); + let mut retval = Ok(true); + if let Some(d) = self + .chain() + .get_desegmenter(&archive_header) + .write() + .as_mut() + { + let res = d.add_rangeproof_segment(segment, bitmap_root_hash); + if let Err(e) = res { + error!( + "Validation of incoming rangeproof segment failed: {:?}, reason: {}", + identifier, e + ); + retval = Err(e); + } + } else { + retval = Ok(false); + } + // Remove segment from outgoing list + self.sync_state.remove_pibd_segment(&SegmentTypeIdentifier { + segment_type: SegmentType::RangeProof, + identifier, + }); + retval + } + + fn receive_kernel_segment( + &self, + block_hash: Hash, + bitmap_root_hash: Hash, + segment: Segment, + ) -> Result { + info!( + "Received kernel segment {} for block_hash: {}, bitmap_root_hash: {}", + segment.identifier().idx, + block_hash, + bitmap_root_hash + ); + let archive_header = self.chain().txhashset_archive_header_header_only()?; + if archive_header.hash() != block_hash { + return Ok(false); + } + let identifier = segment.identifier().clone(); + let mut retval = Ok(true); + if let Some(d) = self + .chain() + .get_desegmenter(&archive_header) + .write() + .as_mut() + { + let res = d.add_kernel_segment(segment, bitmap_root_hash); + if let Err(e) = res { + error!( + "Validation of incoming rangeproof segment failed: {:?}, reason: {}", + identifier, e + ); + retval = Err(e); + } + } else { + retval = Ok(false); + } + // Remove segment from outgoing list + self.sync_state.remove_pibd_segment(&SegmentTypeIdentifier { + segment_type: SegmentType::Kernel, + identifier, + }); + retval + } } -impl NetToChainAdapter +impl NetToChainAdapter where B: BlockChain, P: PoolAdapter, - V: VerifierCache + 'static, { /// Construct a new NetToChainAdapter instance pub fn new( sync_state: Arc, chain: Arc, - tx_pool: Arc>>, - verifier_cache: Arc>, + tx_pool: Arc>>, config: ServerConfig, hooks: Vec>, ) -> Self { @@ -722,16 +863,12 @@ where sync_state, chain: Arc::downgrade(&chain), tx_pool, - verifier_cache, peers: OneTime::new(), config, hooks, processed_headers: EventCache::new(), processed_blocks: EventCache::new(), processed_transactions: EventCache::new(), - header_cache: Arc::new(Mutex::new(HashMap::new())), - tip_processed: Arc::new(Mutex::new(0)), - reset_tip: Arc::new(Mutex::new(0)), } } @@ -806,8 +943,8 @@ where Ok(false) } Err(e) => { - match e.kind() { - chain::ErrorKind::Orphan(orph_msg) => { + match e { + chain::Error::Orphan(orph_msg) => { if let Ok(previous) = previous { // make sure we did not miss the parent block if !self.chain().is_orphan(&previous.hash()) @@ -820,11 +957,7 @@ where Ok(true) } _ => { - debug!( - "process_block: block {} refused by chain: {}", - bhash, - e.kind() - ); + debug!("process_block: block {} refused by chain: {}", bhash, e); Ok(true) } } @@ -861,11 +994,6 @@ where } fn check_compact(&self) { - // Skip compaction if we are syncing. - if self.sync_state.is_syncing() { - return; - } - // Roll the dice to trigger compaction at 1/COMPACTION_CHECK chance per block, // uses a different thread to avoid blocking the caller thread (likely a peer) let mut rng = thread_rng(); @@ -949,29 +1077,29 @@ where /// Implementation of the ChainAdapter for the network. Gets notified when the /// accepted a new block, asking the pool to update its state and /// the network to broadcast the block -pub struct ChainToPoolAndNetAdapter +pub struct ChainToPoolAndNetAdapter where B: BlockChain, P: PoolAdapter, - V: VerifierCache + 'static, { - tx_pool: Arc>>, + tx_pool: Arc>>, peers: OneTime>, hooks: Vec>, } -impl ChainAdapter for ChainToPoolAndNetAdapter +impl ChainAdapter for ChainToPoolAndNetAdapter where B: BlockChain, P: PoolAdapter, - V: VerifierCache + 'static, { fn block_accepted(&self, b: &core::Block, status: BlockStatus, opts: Options) { - // not broadcasting blocks received through sync + // Trigger all registered "on_block_accepted" hooks (logging and webhooks). + for hook in &self.hooks { + hook.on_block_accepted(b, status); + } + + // Suppress broadcast of new blocks received during sync. if !opts.contains(chain::Options::SYNC) { - for hook in &self.hooks { - hook.on_block_accepted(b, status); - } // If we mined the block then we want to broadcast the compact block. // If we received the block from another node then broadcast "header first" // to minimize network traffic. @@ -1005,15 +1133,14 @@ where } } -impl ChainToPoolAndNetAdapter +impl ChainToPoolAndNetAdapter where B: BlockChain, P: PoolAdapter, - V: VerifierCache + 'static, { /// Construct a ChainToPoolAndNetAdapter instance. pub fn new( - tx_pool: Arc>>, + tx_pool: Arc>>, hooks: Vec>, ) -> Self { ChainToPoolAndNetAdapter { @@ -1203,9 +1330,12 @@ impl pool::BlockChain for PoolToChainAdapter { } fn replay_attack_check(&self, tx: &Transaction) -> Result<(), pool::PoolError> { - self.chain() - .replay_attack_check(tx) - .map_err(|_| pool::PoolError::DuplicateKernelOrDuplicateSpent) + self.chain().replay_attack_check(tx).map_err(|e| { + pool::PoolError::DuplicateKernelOrDuplicateSpent(format!( + "Replay attack detected, {}", + e + )) + }) } } diff --git a/servers/src/common/hooks.rs b/servers/src/common/hooks.rs index 25efd09e30..feb587a37a 100644 --- a/servers/src/common/hooks.rs +++ b/servers/src/common/hooks.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/servers/src/common/stats.rs b/servers/src/common/stats.rs index 2347605256..7210181542 100644 --- a/servers/src/common/stats.rs +++ b/servers/src/common/stats.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,11 +16,11 @@ //! to collect information about server status use crate::util::RwLock; +use atomic_float::AtomicF64; use std::sync::atomic::*; use std::sync::Arc; use std::time::SystemTime; -use crate::core::consensus::graph_weight; use crate::core::core::hash::Hash; use crate::core::ser::ProtocolVersion; @@ -28,6 +28,7 @@ use chrono::prelude::*; use crate::chain::SyncStatus; use crate::p2p; +use crate::p2p::Capabilities; use grin_core::pow::Difficulty; /// Server state info collection struct, to be passed around into internals @@ -47,7 +48,7 @@ impl Default for ServerStateInfo { } /// Simpler thread-unaware version of above to be populated and returned to /// consumers might be interested in, such as test results or UI -#[derive(Clone)] +#[derive(Debug, Clone)] pub struct ServerStats { /// Number of peers pub peer_count: u32, @@ -129,14 +130,20 @@ pub struct StratumStats { pub block_height: AtomicU64, /// current network difficulty we're working on pub network_difficulty: AtomicU64, - /// cuckoo size used for mining + /// cuckoo size of last share submitted pub edge_bits: AtomicU16, + /// Number of blocks found by all workers + pub blocks_found: AtomicUsize, + /// current network Hashrate (for edge_bits) + pub network_hashrate: atomic_float::AtomicF64, + /// The minimum acceptable share difficulty to request from miners + pub minimum_share_difficulty: AtomicU64, /// Individual worker status worker_stats: RwLock>, } /// Stats on the last WINDOW blocks and the difficulty calculation -#[derive(Clone)] +#[derive(Debug, Clone)] pub struct DiffStats { /// latest height pub height: u64, @@ -192,6 +199,8 @@ pub struct PeerStats { pub sent_bytes_per_sec: u64, /// Number of bytes we've received from the peer. pub received_bytes_per_sec: u64, + /// Peer advertised capability flags. + pub capabilities: Capabilities, } impl PartialEq for PeerStats { @@ -213,16 +222,9 @@ impl PartialEq for DiffBlock { } impl StratumStats { - /// Calculate network hashrate - pub fn network_hashrate(&self, height: u64) -> f64 { - 42.0 * (self.network_difficulty.load(Ordering::Relaxed) as f64 - / graph_weight(height, self.edge_bits.load(Ordering::Relaxed) as u8) as f64) - / 60.0 - } - /// Allocate a new slot for the worker. Assuming that caller will never fail. /// returns worker Id for the Worker tist - pub fn allocate_new_worker(&self) -> usize { + pub fn allocate_new_worker(&self, pow_difficulty: u64) -> usize { let mut worker_stats = self.worker_stats.write(); let worker_id = worker_stats @@ -233,7 +235,7 @@ impl StratumStats { let mut stats = WorkerStats::default(); stats.is_connected = true; stats.id = worker_id.to_string(); - stats.pow_difficulty = 1; + stats.pow_difficulty = pow_difficulty; if worker_id < worker_stats.len() { worker_stats[worker_id] = stats; @@ -289,8 +291,9 @@ impl PeerStats { height: peer.info.height(), direction: direction.to_string(), last_seen: peer.info.last_seen(), - sent_bytes_per_sec: peer.last_min_sent_bytes().unwrap_or(0) / 60, - received_bytes_per_sec: peer.last_min_received_bytes().unwrap_or(0) / 60, + sent_bytes_per_sec: peer.tracker().sent_bytes.read().bytes_per_min() / 60, + received_bytes_per_sec: peer.tracker().received_bytes.read().bytes_per_min() / 60, + capabilities: peer.info.capabilities, } } } @@ -318,8 +321,11 @@ impl Default for StratumStats { is_running: AtomicBool::new(false), num_workers: AtomicUsize::new(0), block_height: AtomicU64::new(0), - network_difficulty: AtomicU64::new(1000), - edge_bits: AtomicU16::new(29), + network_difficulty: AtomicU64::new(0), + edge_bits: AtomicU16::new(0), + blocks_found: AtomicUsize::new(0), + network_hashrate: AtomicF64::new(0.0), + minimum_share_difficulty: AtomicU64::new(1), worker_stats: RwLock::new(Vec::new()), } } diff --git a/servers/src/common/types.rs b/servers/src/common/types.rs index efdce326fa..892064770b 100644 --- a/servers/src/common/types.rs +++ b/servers/src/common/types.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -28,53 +28,52 @@ use crate::p2p; use crate::pool; use crate::pool::types::DandelionConfig; use crate::store; -use failure::Fail; use std::collections::HashSet; /// Error type wrapping underlying module errors. -#[derive(Debug, Fail)] +#[derive(Debug, thiserror::Error)] pub enum Error { /// Error originating from the core implementation. - #[fail(display = "Core error, {}", _0)] + #[error("Core error, {0}")] Core(core::block::Error), /// Error originating from the libtx implementation. - #[fail(display = "LibTx error, {}", _0)] + #[error("LibTx error, {0}")] LibTx(libtx::Error), /// Error originating from the db storage. - #[fail(display = "Db Store error, {}", _0)] + #[error("Db Store error, {0}")] Store(store::Error), /// Error originating from the blockchain implementation. - #[fail(display = "Blockchain error, {}", _0)] + #[error("Blockchain error, {0}")] Chain(chain::Error), /// Error originating from the peer-to-peer network. - #[fail(display = "P2P error, {}", _0)] + #[error("P2P error, {0}")] P2P(p2p::Error), /// Error originating from HTTP API calls. - #[fail(display = "Http API error, {}", _0)] + #[error("Http API error, {0}")] API(api::Error), /// Error originating from the cuckoo miner - #[fail(display = "Cuckoo miner error, {}", _0)] + #[error("Cuckoo miner error, {0}")] Cuckoo(pow::Error), /// Error originating from the transaction pool. - #[fail(display = "Tx Pool error, {}", _0)] + #[error("Tx Pool error, {0}")] Pool(pool::PoolError), /// Error originating from the keychain. - #[fail(display = "Keychain error, {}", _0)] + #[error("Keychain error, {0}")] Keychain(keychain::Error), /// Invalid Arguments. - #[fail(display = "Invalid argument, {}", _0)] + #[error("Invalid argument, {0}")] ArgumentError(String), /// Wallet communication error - #[fail(display = "Wallet coomunication error, {}", _0)] + #[error("Wallet coomunication error, {0}")] WalletComm(String), /// Error originating from some I/O operation (likely a file on disk). - #[fail(display = "IO error, {}", _0)] + #[error("IO error, {0}")] IOError(std::io::Error), /// Configuration error - #[fail(display = "Configuration error, {}", _0)] + #[error("Configuration error, {0}")] Configuration(String), /// General error - #[fail(display = "General error, {}", _0)] + #[error("General error, {0}")] General(String), } @@ -219,10 +218,6 @@ pub struct ServerConfig { /// (Default: 50 ms) pub duration_sync_long: Option, - /// Header cache size - /// Set to 0 for now - pub header_cache_size: Option, - /// Invalid Block hash list /// (Default: none) pub invalid_block_hashes: Option>, @@ -289,7 +284,6 @@ impl Default for ServerConfig { chain_validation_mode: ChainValidationMode::default(), pool_config: pool::PoolConfig::default(), skip_sync_wait: Some(false), - header_cache_size: Some(0), invalid_block_hashes: Some(vec![]), duration_sync_short: Some(30), duration_sync_long: Some(50), @@ -493,11 +487,11 @@ impl DandelionEpoch { /// Choose a new outbound stem relay peer. pub fn next_epoch(&mut self, peers: &Arc) { self.start_time = Some(Utc::now().timestamp()); - self.relay_peer = peers.outgoing_connected_peers().first().cloned(); + self.relay_peer = peers.iter().outbound().connected().choose_random(); // If stem_probability == 90 then we stem 90% of the time. - let mut rng = rand::thread_rng(); let stem_probability = self.config.stem_probability; + let mut rng = rand::thread_rng(); self.is_stem = rng.gen_range(0, 100) < stem_probability; let addr = self.relay_peer.clone().map(|p| p.info.addr.clone()); @@ -534,7 +528,7 @@ impl DandelionEpoch { } if update_relay { - self.relay_peer = peers.outgoing_connected_peers().first().cloned(); + self.relay_peer = peers.iter().outbound().connected().choose_random(); info!( "DandelionEpoch: relay_peer: new peer chosen: {:?}", self.relay_peer.clone().map(|p| p.info.addr.clone()) diff --git a/servers/src/error.rs b/servers/src/error.rs index 77433e9e8e..7469690c20 100644 --- a/servers/src/error.rs +++ b/servers/src/error.rs @@ -15,122 +15,52 @@ //! Implementation specific error types use crate::util::secp; use crate::util::OnionV3AddressError; -use failure::{Backtrace, Context, Fail}; -use std::env; -use std::fmt::{self, Display}; /// Error definition -#[derive(Debug)] -pub struct Error { - /// Inner Error - pub inner: Context, -} - /// Wallet errors, mostly wrappers around underlying crypto or I/O errors. -#[derive(Clone, Eq, PartialEq, Debug, Fail)] -pub enum ErrorKind { +#[derive(Clone, Eq, PartialEq, Debug, thiserror::Error)] +pub enum Error { /// Tor Configuration Error - #[fail(display = "Tor Config Error: {}", _0)] + #[error("Tor Config Error: {0}")] TorConfig(String), /// Tor Process error - #[fail(display = "Tor Process Error: {}", _0)] + #[error("Tor Process Error: {0}")] TorProcess(String), /// Onion V3 Address Error - #[fail(display = "Onion V3 Address Error")] + #[error("Onion V3 Address Error")] OnionV3Address(OnionV3AddressError), /// Error when formatting json - #[fail(display = "IO error, {}", _0)] + #[error("IO error, {0}")] IO(String), /// Secp Error - #[fail(display = "Secp error, {}", _0)] + #[error("Secp error, {0}")] Secp(secp::Error), /// Generating ED25519 Public Key - #[fail(display = "Error generating ed25519 secret key: {}", _0)] + #[error("Error generating ed25519 secret key: {0}")] ED25519Key(String), /// Checking for onion address - #[fail(display = "Address is not an Onion v3 Address: {}", _0)] + #[error("Address is not an Onion v3 Address: {0}")] NotOnion(String), /// Generic Error - #[fail(display = "libp2p Error, {}", _0)] + #[error("libp2p Error, {0}")] LibP2P(String), } -impl Fail for Error { - fn cause(&self) -> Option<&dyn Fail> { - self.inner.cause() - } - - fn backtrace(&self) -> Option<&Backtrace> { - self.inner.backtrace() - } -} - -impl Display for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let show_bt = match env::var("RUST_BACKTRACE") { - Ok(r) => r == "1", - Err(_) => false, - }; - let backtrace = match self.backtrace() { - Some(b) => format!("{}", b), - None => String::from("Unknown"), - }; - let inner_output = format!("{}", self.inner,); - let backtrace_output = format!("\nBacktrace: {}", backtrace); - let mut output = inner_output; - if show_bt { - output.push_str(&backtrace_output); - } - Display::fmt(&output, f) - } -} - -impl Error { - /// get kind - pub fn kind(&self) -> ErrorKind { - self.inner.get_context().clone() - } - /// get cause - pub fn cause(&self) -> Option<&dyn Fail> { - self.inner.cause() - } - /// get backtrace - pub fn backtrace(&self) -> Option<&Backtrace> { - self.inner.backtrace() - } -} - -impl From for Error { - fn from(kind: ErrorKind) -> Error { - Error { - inner: Context::new(kind), - } - } -} - -impl From> for Error { - fn from(inner: Context) -> Error { - Error { inner: inner } - } -} - impl From for Error { fn from(error: secp::Error) -> Error { - Error { - inner: Context::new(ErrorKind::Secp(error)), - } + Error::Secp(error) } } impl From for Error { fn from(error: OnionV3AddressError) -> Error { - Error::from(ErrorKind::OnionV3Address(error)) + Error::OnionV3Address(error) } } diff --git a/servers/src/grin.rs b/servers/src/grin.rs index b9c8977511..ac6a70fdfd 100644 --- a/servers/src/grin.rs +++ b/servers/src/grin.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/servers/src/grin/dandelion_monitor.rs b/servers/src/grin/dandelion_monitor.rs index 8b83dce12a..01f1c4666a 100644 --- a/servers/src/grin/dandelion_monitor.rs +++ b/servers/src/grin/dandelion_monitor.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -21,10 +21,9 @@ use std::time::{Duration, Instant}; use crate::common::adapters::DandelionAdapter; use crate::core::core::hash::Hashed; use crate::core::core::transaction; -use crate::core::core::verifier_cache::VerifierCache; use crate::pool::{BlockChain, DandelionConfig, Pool, PoolEntry, PoolError, TxSource}; use crate::util::StopState; -use crate::{ServerTxPool, ServerVerifierCache}; +use crate::ServerTxPool; /// A process to monitor transactions in the stempool. /// With Dandelion, transaction can be broadcasted in stem or fluff phase. @@ -38,7 +37,6 @@ pub fn monitor_transactions( dandelion_config: DandelionConfig, tx_pool: ServerTxPool, adapter: Arc, - verifier_cache: ServerVerifierCache, stop_state: Arc, ) -> std::io::Result> { debug!("Started Dandelion transaction monitor."); @@ -58,15 +56,11 @@ pub fn monitor_transactions( if last_run.elapsed() > run_interval { if !adapter.is_stem() { - let _ = process_fluff_phase( - &dandelion_config, - &tx_pool, - &adapter, - &verifier_cache, - ) - .map_err(|e| { - error!("dand_mon: Problem processing fluff phase. {}", e); - }); + let _ = process_fluff_phase(&dandelion_config, &tx_pool, &adapter).map_err( + |e| { + error!("dand_mon: Problem processing fluff phase. {}", e); + }, + ); } // Now find all expired entries based on embargo timer. @@ -91,10 +85,9 @@ pub fn monitor_transactions( // Query the pool for transactions older than the cutoff. // Used for both periodic fluffing and handling expired embargo timer. -fn select_txs_cutoff(pool: &Pool, cutoff_secs: u16) -> Vec +fn select_txs_cutoff(pool: &Pool, cutoff_secs: u16) -> Vec where B: BlockChain, - V: VerifierCache, { let cutoff = Utc::now().timestamp() - cutoff_secs as i64; pool.entries @@ -108,7 +101,6 @@ fn process_fluff_phase( dandelion_config: &DandelionConfig, tx_pool: &ServerTxPool, adapter: &Arc, - verifier_cache: &ServerVerifierCache, ) -> Result<(), PoolError> { // Take a write lock on the txpool for the duration of this processing. let mut tx_pool = tx_pool.write(); @@ -147,10 +139,7 @@ fn process_fluff_phase( ); let agg_tx = transaction::aggregate(&fluffable_txs)?; - agg_tx.validate( - transaction::Weighting::AsTransaction, - verifier_cache.clone(), - )?; + agg_tx.validate(transaction::Weighting::AsTransaction, header.height)?; tx_pool.add_to_pool(TxSource::Fluff, agg_tx, false, &header)?; Ok(()) diff --git a/servers/src/grin/seed.rs b/servers/src/grin/seed.rs index 06c14ae455..d86b10eb9b 100644 --- a/servers/src/grin/seed.rs +++ b/servers/src/grin/seed.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,11 +17,12 @@ //! configurable with either no peers, a user-defined list or a preset //! list of DNS records (the default). +use crate::core::pow::Difficulty; use chrono::prelude::{DateTime, Utc}; -use chrono::{Duration, MIN_DATE}; +use chrono::Duration; use grin_p2p::PeerAddr::Onion; -use rand::seq::SliceRandom; -use rand::thread_rng; +use grin_p2p::{msg::PeerAddrs, P2PConfig}; +use rand::prelude::*; use std::collections::HashMap; use std::net::ToSocketAddrs; use std::sync::{mpsc, Arc}; @@ -37,14 +38,10 @@ use crate::util::StopState; pub fn connect_and_monitor( p2p_server: Arc, - capabilities: p2p::Capabilities, seed_list: Box Vec + Send>, - preferred_peers: &[PeerAddr], + config: P2PConfig, stop_state: Arc, - header_cache_size: u64, ) -> std::io::Result> { - let preferred_peers = preferred_peers.to_vec(); - thread::Builder::new() .name("seed".to_string()) .spawn(move || { @@ -57,25 +54,27 @@ pub fn connect_and_monitor( let seed_list = seed_list(); // check seeds first - connect_to_seeds_and_preferred_peers( + connect_to_seeds_and_peers( peers.clone(), tx.clone(), seed_list.clone(), - &preferred_peers, + config.clone(), ); libp2p_connection::set_seed_list(&seed_list, true); - let mut prev = MIN_DATE.and_hms(0, 0, 0); - let mut prev_expire_check = MIN_DATE.and_hms(0, 0, 0); + let mut prev = DateTime::::MIN_UTC; + let mut prev_expire_check = DateTime::::MIN_UTC; + let mut prev_ping = Utc::now(); let mut start_attempt = 0; let mut connecting_history: HashMap> = HashMap::new(); + loop { if stop_state.is_stopped() { break; } - let peer_count = peers.all_peers().len(); + let peer_count = peers.all_peer_data().len(); // Pause egress peer connection request. Only for tests. if stop_state.is_paused() { thread::sleep(time::Duration::from_secs(1)); @@ -85,16 +84,16 @@ pub fn connect_and_monitor( let connected_peers = if peer_count == 0 { 0 } else { - peers.connected_peers().len() + peers.iter().connected().count() }; if connected_peers == 0 { info!("No peers connected, trying to reconnect to seeds!"); - connect_to_seeds_and_preferred_peers( + connect_to_seeds_and_peers( peers.clone(), tx.clone(), seed_list.clone(), - &preferred_peers, + config.clone(), ); thread::sleep(time::Duration::from_secs(1)); @@ -116,14 +115,10 @@ pub fn connect_and_monitor( listen_for_addrs( peers.clone(), p2p_server.clone(), - capabilities, &rx, &mut connecting_history, - header_cache_size, connect_all, ); - prev = Utc::now(); - start_attempt = cmp::min(6, start_attempt + 1); if peer_count != 0 && connected_peers != 0 { connect_all = false; @@ -139,12 +134,10 @@ pub fn connect_and_monitor( } // monitor additional peers if we need to add more - monitor_peers( - peers.clone(), - p2p_server.config.clone(), - tx.clone(), - &preferred_peers, - ); + monitor_peers(peers.clone(), p2p_server.config.clone(), tx.clone()); + + prev = Utc::now(); + start_attempt = cmp::min(6, start_attempt + 1); } if peer_count == 0 { @@ -169,20 +162,15 @@ pub fn connect_and_monitor( }) } -fn monitor_peers( - peers: Arc, - config: p2p::P2PConfig, - tx: mpsc::Sender, - preferred_peers: &[PeerAddr], -) { - // regularly check if we need to acquire more peers and if so, gets +fn monitor_peers(peers: Arc, config: p2p::P2PConfig, tx: mpsc::Sender) { + // regularly check if we need to acquire more peers and if so, gets // them from db - let total_count = peers.all_peers().len(); + let mut total_count = 0; let mut healthy_count = 0; let mut banned_count = 0; let mut defuncts = vec![]; - for x in peers.all_peers() { + for x in peers.all_peer_data().into_iter() { match x.flags { p2p::State::Banned => { let interval = Utc::now().timestamp() - x.last_banned; @@ -202,15 +190,21 @@ fn monitor_peers( p2p::State::Healthy => healthy_count += 1, p2p::State::Defunct => defuncts.push(x), } + total_count += 1; } + let peers_iter = || peers.iter().connected(); + let peers_count = peers_iter().count(); + let max_diff = peers_iter().max_difficulty().unwrap_or(Difficulty::zero()); + let most_work_count = peers_iter().with_difficulty(|x| x >= max_diff).count(); + debug!( "monitor_peers: on {}:{}, {} connected ({} most_work). \ all {} = {} healthy + {} banned + {} defunct", config.host, config.port, - peers.peer_count(), - peers.most_work_peers().len(), + peers_count, + most_work_count, total_count, healthy_count, banned_count, @@ -221,17 +215,21 @@ fn monitor_peers( peers.clean_peers( config.peer_max_inbound_count() as usize, config.peer_max_outbound_count() as usize, - preferred_peers, + config.clone(), ); if peers.enough_outbound_peers() { return; } - // loop over connected peers + // loop over connected peers that can provide peer lists // ask them for their list of peers let mut connected_peers: Vec = vec![]; - for p in peers.connected_peers() { + for p in peers + .iter() + .with_capabilities(p2p::Capabilities::PEER_LIST) + .connected() + { trace!( "monitor_peers: {}:{} ask {} for more peers", config.host, @@ -243,21 +241,21 @@ fn monitor_peers( } // Attempt to connect to any preferred peers. - for p in preferred_peers { + let peers_preferred = config.peers_preferred.unwrap_or(PeerAddrs::default()); + for p in peers_preferred { if !connected_peers.is_empty() { - if !connected_peers.contains(p) { - tx.send(p.clone()).unwrap(); + if !connected_peers.contains(&p) { + let _ = tx.send(p); } } else { - tx.send(p.clone()).unwrap(); + let _ = tx.send(p); } } - // take a random defunct peer and mark it healthy: over a long period any + // take a random defunct peer and mark it healthy: over a long enough period any // peer will see another as defunct eventually, gives us a chance to retry - if !defuncts.is_empty() { - defuncts.shuffle(&mut thread_rng()); - let _ = peers.update_state(defuncts[0].addr.clone(), p2p::State::Healthy); + if let Some(peer) = defuncts.into_iter().choose(&mut thread_rng()) { + let _ = peers.update_state(peer.addr, p2p::State::Healthy); } // find some peers from our db @@ -284,33 +282,50 @@ fn monitor_peers( // Check if we have any pre-existing peer in db. If so, start with those, // otherwise use the seeds provided. -fn connect_to_seeds_and_preferred_peers( +fn connect_to_seeds_and_peers( peers: Arc, tx: mpsc::Sender, seed_list: Vec, - peers_preferred: &[PeerAddr], + config: P2PConfig, ) { + let peers_deny = config.peers_deny.unwrap_or(PeerAddrs::default()); + + // If "peers_allow" is explicitly configured then just use this list + // remembering to filter out "peers_deny". + if let Some(peers) = config.peers_allow { + for addr in peers.difference(peers_deny.as_slice()) { + let _ = tx.send(addr); + } + return; + } + + // Always try our "peers_preferred" remembering to filter out "peers_deny". + if let Some(peers) = config.peers_preferred { + for addr in peers.difference(peers_deny.as_slice()) { + let _ = tx.send(addr); + } + } + // check if we have some peers in db // look for peers that are able to give us other peers (via PEER_LIST capability) let peers = peers.find_peers(p2p::State::Healthy, p2p::Capabilities::PEER_LIST, 100); // if so, get their addresses, otherwise use our seeds - let mut peer_addrs = if peers.len() > 3 { + let peer_addrs = if peers.len() > 3 { peers.iter().map(|p| p.addr.clone()).collect::>() } else { seed_list }; - // If we have preferred peers add them to the initial list - peer_addrs.extend_from_slice(peers_preferred); - if peer_addrs.is_empty() { warn!("No seeds were retrieved."); } - // connect to this first set of addresses + // connect to this initial set of peer addresses (either seeds or from our local db). for addr in peer_addrs { - tx.send(addr).unwrap(); + if !peers_deny.as_slice().contains(&addr) { + let _ = tx.send(addr); + } } } @@ -320,10 +335,8 @@ fn connect_to_seeds_and_preferred_peers( fn listen_for_addrs( peers: Arc, p2p: Arc, - capab: p2p::Capabilities, rx: &mpsc::Receiver, connecting_history: &mut HashMap>, - header_cache_size: u64, attempt_all: bool, ) { // Pull everything currently on the queue off the queue. @@ -333,7 +346,7 @@ fn listen_for_addrs( let mut addrs: Vec = rx.try_iter().collect(); if attempt_all { - for x in peers.all_peers() { + for x in peers.all_peer_data() { match x.flags { p2p::State::Banned => {} _ => { @@ -347,6 +360,7 @@ fn listen_for_addrs( if peers.enough_outbound_peers() { return; } + // Note: We drained the rx queue earlier to keep it under control. // Even if there are many addresses to try we will only try a bounded number of them for safety. let connect_min_interval = 30; @@ -366,7 +380,6 @@ fn listen_for_addrs( *history = now; } } - connecting_history.insert(addr.clone(), now); let peers_c = peers.clone(); @@ -386,20 +399,25 @@ fn listen_for_addrs( }; if update_possible { - match p2p_c.connect(addr.clone(), header_cache_size) { + match p2p_c.connect(addr.clone()) { Ok(p) => { - debug!("Sending peer request to {}", addr); - if p.send_peer_request(capab).is_ok() { - match addr { - PeerAddr::Onion(_) => { - if let Err(_) = libp2p_connection::add_new_peer(&addr) { - error!("Unable to add libp2p peer {}", addr); + // If peer advertizes PEER_LIST then ask it for more peers that support PEER_LIST. + // We want to build a local db of possible peers to connect to. + // We do not necessarily care (at this point in time) what other capabilities these peers support. + if p.info.capabilities.contains(p2p::Capabilities::PEER_LIST) { + debug!("Sending peer request to {}", addr); + if p.send_peer_request(p2p::Capabilities::PEER_LIST).is_ok() { + match addr { + PeerAddr::Onion(_) => { + if let Err(_) = libp2p_connection::add_new_peer(&addr) { + error!("Unable to add libp2p peer {}", addr); + } } - } - _ => (), - }; - let _ = peers_c.update_state(addr, p2p::State::Healthy); + _ => (), + }; + } } + let _ = peers_c.update_state(addr, p2p::State::Healthy); } Err(e) => { debug!("Connection to the peer {} was rejected, {}", addr, e); @@ -453,7 +471,8 @@ pub fn default_dns_seeds() -> Box Vec + Send> { }) } -fn resolve_dns_to_addrs(dns_records: &Vec) -> Vec { +/// Convenience function to resolve dns addresses from DNS records +pub fn resolve_dns_to_addrs(dns_records: &Vec) -> Vec { let mut addresses: Vec = vec![]; for dns in dns_records { if dns.ends_with(".onion") { diff --git a/servers/src/grin/server.rs b/servers/src/grin/server.rs index 1dbf11770b..6f08b6c5fb 100644 --- a/servers/src/grin/server.rs +++ b/servers/src/grin/server.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,7 +18,6 @@ use crate::tor::config as tor_config; use crate::util::secp; -use std::fs; use std::fs::File; use std::io::prelude::*; use std::path::PathBuf; @@ -27,15 +26,14 @@ use std::sync::mpsc; use std::sync::mpsc::Receiver; use std::sync::mpsc::Sender; use std::sync::Arc; +use std::{convert::TryInto, fs}; use std::{ thread::{self, JoinHandle}, time::{self, Duration}, }; -use crate::ErrorKind; - use fs2::FileExt; -use grin_util::{to_hex, OnionV3Address}; +use grin_util::{static_secp_instance, to_hex, OnionV3Address}; use walkdir::WalkDir; use crate::api; @@ -48,10 +46,8 @@ use crate::common::hooks::{init_chain_hooks, init_net_hooks}; use crate::common::stats::{ ChainStats, DiffBlock, DiffStats, PeerStats, ServerStateInfo, ServerStats, TxStats, }; - use crate::common::types::{Error, ServerConfig, StratumServerConfig}; -use crate::core::core::hash::Hashed; -use crate::core::core::verifier_cache::LruVerifierCache; +use crate::core::core::hash::{Hashed, ZERO_HASH}; use crate::core::ser::ProtocolVersion; use crate::core::stratum::connections; use crate::core::{consensus, genesis, global, pow}; @@ -73,16 +69,14 @@ use std::sync::atomic::Ordering; use crate::p2p::libp2p_connection; use chrono::Utc; use grin_core::core::TxKernel; +use grin_p2p::Capabilities; use grin_util::from_hex; use grin_util::secp::constants::SECRET_KEY_SIZE; use grin_util::secp::pedersen::Commitment; use std::collections::HashMap; /// Arcified thread-safe TransactionPool with type parameters used by server components -pub type ServerTxPool = - Arc>>; -/// Arcified thread-safe LruVerifierCache -pub type ServerVerifierCache = Arc>; +pub type ServerTxPool = Arc>>; /// Grin server holding internal structures. pub struct Server { @@ -94,9 +88,6 @@ pub struct Server { pub chain: Arc, /// in-memory transaction pool pub tx_pool: ServerTxPool, - /// Shared cache for verification results when - /// verifying rangeproof and kernel signatures. - verifier_cache: ServerVerifierCache, /// Whether we're currently syncing pub sync_state: Arc, /// To be passed around to collect stats and info @@ -219,7 +210,6 @@ impl Server { stop_state: Option>, api_chan: &'static mut (oneshot::Sender<()>, oneshot::Receiver<()>), ) -> Result { - let header_cache_size = config.header_cache_size.unwrap_or(25_000); //let duration_sync_long = config.duration_sync_long.unwrap_or(150); //let duration_sync_short = config.duration_sync_short.unwrap_or(100); @@ -245,16 +235,11 @@ impl Server { Arc::new(StopState::new()) }; - // Shared cache for verification results. - // We cache rangeproof verification and kernel signature verification. - let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); - let pool_adapter = Arc::new(PoolToChainAdapter::new()); let pool_net_adapter = Arc::new(PoolToNetAdapter::new(config.dandelion_config.clone())); let tx_pool = Arc::new(RwLock::new(pool::TransactionPool::new( config.pool_config.clone(), pool_adapter.clone(), - verifier_cache.clone(), pool_net_adapter.clone(), ))); @@ -279,7 +264,6 @@ impl Server { chain_adapter.clone(), genesis.clone(), pow::verify_size, - verifier_cache.clone(), archive_mode, )?); @@ -289,17 +273,10 @@ impl Server { sync_state.clone(), shared_chain.clone(), tx_pool.clone(), - verifier_cache.clone(), config.clone(), init_net_hooks(&config), )); - // we always support tor, so don't rely on config. This fixes - // the problem of old config files - // only for capabilities params, doesn't mean - // tor _MUST_ be on. - let capab = config.p2p_config.capabilities | p2p::Capabilities::TOR_ADDRESS; - api::reset_server_onion_address(); let (onion_address, tor_secret) = if config.tor_config.tor_enabled { @@ -352,7 +329,10 @@ impl Server { Err(e) => { input.send(None).unwrap(); error!("failed to start Tor due to {}", e); - Err(ErrorKind::TorConfig(format!("Failed to init tor, {}", e))) + Err(crate::Error::TorConfig(format!( + "Failed to init tor, {}", + e + ))) } }; })?; @@ -513,9 +493,18 @@ impl Server { })?; } + // Initialize our capabilities. + // Currently either "default" or with optional "archive_mode" (block history) support enabled. + let capabilities = if let Some(true) = config.archive_mode { + Capabilities::default() | Capabilities::BLOCK_HIST + } else { + Capabilities::default() + }; + debug!("Capabilities: {:?}", capabilities); + let p2p_server = Arc::new(p2p::Server::new( &config.db_root, - capab, + capabilities, config.p2p_config.clone(), net_adapter.clone(), genesis.hash(), @@ -532,7 +521,7 @@ impl Server { let mut connect_thread = None; if config.p2p_config.seeding_type != p2p::Seeding::Programmatic { - let seeder = match config.p2p_config.seeding_type { + let seed_list = match config.p2p_config.seeding_type { p2p::Seeding::None => { warn!("No seed configured, will stay solo until connected to"); seed::predefined_seeds(vec![]) @@ -549,18 +538,11 @@ impl Server { _ => unreachable!(), }; - let preferred_peers = match &config.p2p_config.peers_preferred { - Some(addrs) => addrs.peers.clone(), - None => vec![], - }; - connect_thread = Some(seed::connect_and_monitor( p2p_server.clone(), - config.p2p_config.capabilities, - seeder, - &preferred_peers, + seed_list, + config.p2p_config.clone(), stop_state.clone(), - header_cache_size, )?); } @@ -580,7 +562,7 @@ impl Server { let _ = thread::Builder::new() .name("p2p-server".to_string()) .spawn(move || { - if let Err(e) = p2p_inner.listen(header_cache_size) { + if let Err(e) = p2p_inner.listen() { error!("P2P server failed with erorr: {:?}", e); } })?; @@ -623,7 +605,6 @@ impl Server { config.dandelion_config.clone(), tx_pool.clone(), pool_net_adapter, - verifier_cache.clone(), stop_state.clone(), )?; @@ -633,7 +614,6 @@ impl Server { p2p: p2p_server, chain: shared_chain, tx_pool, - verifier_cache, sync_state, state_info: ServerStateInfo { ..Default::default() @@ -718,22 +698,30 @@ impl Server { let scoped_vec; let mut existing_onion = None; let secret = if !found { - let sec_key = secp::key::SecretKey::new(&mut rand::thread_rng()); + let secp = static_secp_instance(); + let secp = secp.lock(); + + let sec_key = secp::key::SecretKey::new(&secp, &mut rand::thread_rng()); scoped_vec = vec![sec_key.clone()]; sec_key_vec = Some((scoped_vec).as_slice()); onion_address = OnionV3Address::from_private(&sec_key.0) - .map_err(|e| ErrorKind::TorConfig(format!("Unable to build onion address, {}", e))) + .map_err(|e| { + crate::Error::TorConfig(format!("Unable to build onion address, {}", e)) + }) .unwrap() .to_string(); sec_key } else { + let secp = static_secp_instance(); + let secp = secp.lock(); + existing_onion = Some(onion_address.clone()); // Read Secret from the file. - let sec = tor_config::read_sec_key_file(&format!( - "{}{}{}", - onion_service_dir, MAIN_SEPARATOR, onion_address - )) + let sec = tor_config::read_sec_key_file( + &format!("{}{}{}", onion_service_dir, MAIN_SEPARATOR, onion_address), + &secp, + ) .map_err(|e| Error::General(format!("Unable to read tor secret, {}", e)))?; sec }; @@ -757,7 +745,7 @@ impl Server { existing_onion, socks_port, ) - .map_err(|e| ErrorKind::TorConfig(format!("Failed to configure tor, {}", e).into())) + .map_err(|e| crate::Error::TorConfig(format!("Failed to configure tor, {}", e).into())) .unwrap(); info!( @@ -778,15 +766,20 @@ impl Server { .launch(); if res.is_err() { - Err(Error::Configuration("Unable to start tor".to_string())) + Err(Error::Configuration(format!( + "Unable to start tor due error: {} Started with config: {}, working dir: {}", + res.err().unwrap(), + tor_path, + tor_dir + ))) } else { Ok((process, onion_address.to_string(), secret)) } } /// Asks the server to connect to a peer at the provided network address. - pub fn connect_peer(&self, addr: PeerAddr, header_cache_size: u64) -> Result<(), Error> { - self.p2p.connect(addr, header_cache_size)?; + pub fn connect_peer(&self, addr: PeerAddr) -> Result<(), Error> { + self.p2p.connect(addr)?; Ok(()) } @@ -800,7 +793,13 @@ impl Server { /// Number of peers pub fn peer_count(&self) -> u32 { - self.p2p.peers.peer_count() + self.p2p + .peers + .iter() + .connected() + .count() + .try_into() + .unwrap() } /// Start a minimal "stratum" mining service on a separate thread @@ -810,7 +809,6 @@ impl Server { config: StratumServerConfig, ip_pool: Arc, ) { - let edge_bits = global::min_edge_bits(); let proof_size = global::proofsize(); let sync_state = self.sync_state.clone(); @@ -818,14 +816,13 @@ impl Server { config, self.chain.clone(), self.tx_pool.clone(), - self.verifier_cache.clone(), self.state_info.stratum_stats.clone(), ip_pool, ); let _ = thread::Builder::new() .name("stratum_server".to_string()) .spawn(move || { - stratum_server.run_loop(edge_bits as u32, proof_size, sync_state); + stratum_server.run_loop(proof_size, sync_state); }); } @@ -866,7 +863,6 @@ impl Server { config, self.chain.clone(), self.tx_pool.clone(), - self.verifier_cache.clone(), stop_state, sync_state, ); @@ -902,7 +898,7 @@ impl Server { // code clean. This may be handy for testing but not really needed // for release let diff_stats = { - let last_blocks: Vec = + let last_blocks: Vec = global::difficulty_data_to_vector(self.chain.difficulty_iter()?) .into_iter() .collect(); @@ -918,9 +914,11 @@ impl Server { height += 1; + let block_hash = next.hash.unwrap_or(ZERO_HASH); + DiffBlock { block_height: height, - block_hash: next.block_hash, + block_hash, difficulty: next.difficulty.to_num(), time: next.timestamp, duration: next.timestamp - prev.timestamp, @@ -944,7 +942,8 @@ impl Server { let peer_stats = self .p2p .peers - .connected_peers() + .iter() + .connected() .into_iter() .map(|p| PeerStats::from_peer(&p)) .collect(); diff --git a/servers/src/grin/sync.rs b/servers/src/grin/sync.rs index 2f01c286d5..50a9fe5919 100644 --- a/servers/src/grin/sync.rs +++ b/servers/src/grin/sync.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/servers/src/grin/sync/body_sync.rs b/servers/src/grin/sync/body_sync.rs index d77e634a01..c50a7ee6a9 100644 --- a/servers/src/grin/sync/body_sync.rs +++ b/servers/src/grin/sync/body_sync.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,11 +14,14 @@ use chrono::prelude::{DateTime, Utc}; use chrono::Duration; +use p2p::Capabilities; +use rand::prelude::*; use std::cmp; use std::sync::Arc; -use crate::chain::{self, SyncState, SyncStatus}; -use crate::core::core::hash::Hash; +use crate::chain::{self, SyncState, SyncStatus, Tip}; +use crate::core::core::hash::{Hash, Hashed}; +use crate::core::core::BlockHeader; use crate::p2p; pub struct BodySync { @@ -69,27 +72,58 @@ impl BodySync { Ok(false) } + /// Is our local node running in archive_mode? + fn archive_mode(&self) -> bool { + self.chain.archive_mode() + } + /// Return true if txhashset download is needed (when requested block is under the horizon). + /// Otherwise go request some missing blocks and return false. fn body_sync(&mut self) -> Result { - let mut hashes: Option> = Some(vec![]); - let txhashset_needed = self - .chain - .check_txhashset_needed("body_sync".to_owned(), &mut hashes)?; + let head = self.chain.head()?; + let header_head = self.chain.header_head()?; + let fork_point = self.chain.fork_point()?; - if txhashset_needed { - debug!( + if self.chain.check_txhashset_needed(&fork_point)? { + trace!( "body_sync: cannot sync full blocks earlier than horizon. will request txhashset", ); return Ok(true); } - let mut hashes = hashes.ok_or_else(|| { - chain::ErrorKind::SyncError("Got no hashes for body sync".to_string()) - })?; + let peers = { + // Find connected peers with strictly greater difficulty than us. + let peers_iter = || { + // If we are running with archive mode enabled we only want to sync + // from other archive nodes. + let cap = if self.archive_mode() { + Capabilities::BLOCK_HIST + } else { + Capabilities::UNKNOWN + }; + + self.peers + .iter() + .with_capabilities(cap) + .with_difficulty(|x| x > head.total_difficulty) + .connected() + }; + + // We prefer outbound peers with greater difficulty. + let mut peers: Vec<_> = peers_iter().outbound().into_iter().collect(); + if peers.is_empty() { + debug!("no outbound peers with more work, considering inbound"); + peers = peers_iter().inbound().into_iter().collect(); + } - hashes.reverse(); + // If we have no peers (outbound or inbound) then we are done for now. + if peers.is_empty() { + debug!("no peers (inbound or outbound) with more work"); + return Ok(false); + } - let peers = self.peers.more_work_peers()?; + peers + }; // if we have 5 peers to sync from then ask for 50 blocks total (peer_count * // 10) max will be 80 if all 8 peers are advertising more work @@ -99,25 +133,14 @@ impl BodySync { chain::MAX_ORPHAN_SIZE.saturating_sub(self.chain.orphans_len()) + 1, ); - let hashes_to_get = hashes - .iter() - .filter(|x| { - // only ask for blocks that we have not yet processed - // either successfully stored or in our orphan list - self.chain.get_block(x).is_err() && !self.chain.is_orphan(x) - }) - .take(block_count) - .collect::>(); - - if !hashes_to_get.is_empty() { - let body_head = self.chain.head()?; - let header_head = self.chain.header_head()?; + let hashes = self.block_hashes_to_sync(&fork_point, &header_head, block_count as u64)?; + if !hashes.is_empty() { debug!( "block_sync: {}/{} requesting blocks {:?} from {} peers", - body_head.height, + head.height, header_head.height, - hashes_to_get, + hashes, peers.len(), ); @@ -125,10 +148,10 @@ impl BodySync { self.blocks_requested = 0; self.receive_timeout = Utc::now() + Duration::seconds(6); - let mut peers_iter = peers.iter().cycle(); - for hash in hashes_to_get.clone() { - if let Some(peer) = peers_iter.next() { - if let Err(e) = peer.send_block_request(*hash, chain::Options::SYNC) { + let mut rng = rand::thread_rng(); + for hash in hashes { + if let Some(peer) = peers.choose(&mut rng) { + if let Err(e) = peer.send_block_request(hash, chain::Options::SYNC) { debug!("Skipped request to {}: {:?}", peer.info.addr, e); peer.stop(); } else { @@ -140,6 +163,25 @@ impl BodySync { return Ok(false); } + fn block_hashes_to_sync( + &self, + fork_point: &BlockHeader, + header_head: &Tip, + count: u64, + ) -> Result, chain::Error> { + let mut hashes = vec![]; + let max_height = cmp::min(fork_point.height + count, header_head.height); + let mut current = self.chain.get_header_by_height(max_height)?; + while current.height > fork_point.height { + if !self.chain.is_orphan(¤t.hash()) { + hashes.push(current.hash()); + } + current = self.chain.get_previous_header(¤t)?; + } + hashes.reverse(); + Ok(hashes) + } + // Should we run block body sync and ask for more full blocks? fn body_sync_due(&mut self) -> Result { let blocks_received = self.blocks_received()?; @@ -169,7 +211,7 @@ impl BodySync { // off by one to account for broadcast adding a couple orphans if self.blocks_requested < 2 { // no pending block requests, ask more - debug!("body_sync: no pending block request, asking more"); + trace!("body_sync: no pending block request, asking more"); return Ok(true); } diff --git a/servers/src/grin/sync/header_sync.rs b/servers/src/grin/sync/header_sync.rs index ffcde6aef0..02a581e6d5 100644 --- a/servers/src/grin/sync/header_sync.rs +++ b/servers/src/grin/sync/header_sync.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,8 +18,9 @@ use std::sync::Arc; use crate::chain::{self, SyncState, SyncStatus}; use crate::common::types::Error; -use crate::core::core::hash::{Hash, Hashed}; -use crate::p2p::{self, types::ReasonForBan, Peer}; +use crate::core::core::hash::Hash; +use crate::core::pow::Difficulty; +use crate::p2p::{self, types::ReasonForBan, Capabilities, Peer}; pub struct HeaderSync { sync_state: Arc, @@ -46,56 +47,53 @@ impl HeaderSync { } } - pub fn check_run( - &mut self, - header_head: &chain::Tip, - highest_height: u64, - ) -> Result { - if !self.header_sync_due(header_head) { - return Ok(false); - } - - let enable_header_sync = match self.sync_state.status() { + pub fn check_run(&mut self, sync_head: chain::Tip) -> Result { + // We only want to run header_sync for some sync states. + let do_run = match self.sync_state.status() { SyncStatus::BodySync { .. } | SyncStatus::HeaderSync { .. } - | SyncStatus::TxHashsetDone => true, - SyncStatus::NoSync | SyncStatus::Initial | SyncStatus::AwaitingPeers(_) => { - let sync_head = self.chain.get_sync_head()?; - debug!( - "sync: initial transition to HeaderSync. sync_head: {} at {}, resetting to: {} at {}", - sync_head.hash(), - sync_head.height, - header_head.hash(), - header_head.height, - ); - - // Reset sync_head to header_head on transition to HeaderSync, - // but ONLY on initial transition to HeaderSync state. - // - // The header_head and sync_head may diverge here in the presence of a fork - // in the header chain. Ensure we track the new advertised header chain here - // correctly, so reset any previous (and potentially stale) sync_head to match - // our last known "good" header_head. - // - self.chain.rebuild_sync_mmr(&header_head)?; - true - } + | SyncStatus::TxHashsetDone + | SyncStatus::NoSync + | SyncStatus::Initial + | SyncStatus::AwaitingPeers(_) => true, _ => false, }; - if enable_header_sync { + if !do_run { + return Ok(false); + } + + // TODO - can we safely reuse the peer here across multiple runs? + let sync_peer = self.choose_sync_peer(); + + if let Some(sync_peer) = sync_peer { + let (peer_height, peer_diff) = { + let info = sync_peer.info.live_info.read(); + (info.height, info.total_difficulty) + }; + + // Quick check - nothing to sync if we are caught up with the peer. + if peer_diff <= sync_head.total_difficulty { + return Ok(false); + } + + if !self.header_sync_due(sync_head) { + return Ok(false); + } + self.sync_state.update(SyncStatus::HeaderSync { - current_height: header_head.height, - highest_height: highest_height, + sync_head, + highest_height: peer_height, + highest_diff: peer_diff, }); - self.syncing_peer = self.header_sync(); - return Ok(true); + self.header_sync(sync_head, sync_peer.clone()); + self.syncing_peer = Some(sync_peer.clone()); } - Ok(false) + Ok(true) } - fn header_sync_due(&mut self, header_head: &chain::Tip) -> bool { + fn header_sync_due(&mut self, header_head: chain::Tip) -> bool { let now = Utc::now(); let (timeout, latest_height, prev_height) = self.prev_header_sync; @@ -168,40 +166,49 @@ impl HeaderSync { } } - fn header_sync(&mut self) -> Option> { - if let Ok(header_head) = self.chain.header_head() { - let difficulty = header_head.total_difficulty; + fn choose_sync_peer(&self) -> Option> { + let peers_iter = || { + self.peers + .iter() + .with_capabilities(Capabilities::HEADER_HIST) + .connected() + }; - if let Some(peer) = self.peers.most_work_peer() { - if peer.info.total_difficulty() > difficulty { - return self.request_headers(peer); - } - } + // Filter peers further based on max difficulty. + let max_diff = peers_iter().max_difficulty().unwrap_or(Difficulty::zero()); + let peers_iter = || peers_iter().with_difficulty(|x| x >= max_diff); + + // Choose a random "most work" peer, preferring outbound if at all possible. + peers_iter().outbound().choose_random().or_else(|| { + debug!("no suitable outbound peer for header sync, considering inbound"); + peers_iter().inbound().choose_random() + }) + } + + fn header_sync(&self, sync_head: chain::Tip, peer: Arc) { + if peer.info.total_difficulty() > sync_head.total_difficulty { + self.request_headers(sync_head, peer); } - return None; } /// Request some block headers from a peer to advance us. - fn request_headers(&mut self, peer: Arc) -> Option> { - if let Ok(locator) = self.get_locator() { + fn request_headers(&self, sync_head: chain::Tip, peer: Arc) { + if let Ok(locator) = self.get_locator(sync_head) { debug!( "sync: request_headers: asking {} for headers, {:?}", peer.info.addr, locator, ); let _ = peer.send_header_request(locator); - return Some(peer); } - return None; } /// We build a locator based on sync_head. /// Even if sync_head is significantly out of date we will "reset" it once we /// start getting headers back from a peer. - fn get_locator(&mut self) -> Result, Error> { - let tip = self.chain.get_sync_head()?; - let heights = get_locator_heights(tip.height); - let locator = self.chain.get_locator_hashes(&heights)?; + fn get_locator(&self, sync_head: chain::Tip) -> Result, Error> { + let heights = get_locator_heights(sync_head.height); + let locator = self.chain.get_locator_hashes(sync_head, &heights)?; Ok(locator) } } diff --git a/servers/src/grin/sync/state_sync.rs b/servers/src/grin/sync/state_sync.rs index 5dc5c953ce..c6b7a7e265 100644 --- a/servers/src/grin/sync/state_sync.rs +++ b/servers/src/grin/sync/state_sync.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,12 +14,19 @@ use chrono::prelude::{DateTime, Utc}; use chrono::Duration; +use grin_core::core::hash::Hash; +use grin_core::core::BlockHeader; +use grin_p2p::ReasonForBan; +use grin_util::secp::rand::Rng; +use rand::seq::SliceRandom; use std::sync::Arc; -use crate::chain::{self, SyncState, SyncStatus}; -use crate::core::core::hash::Hashed; +use crate::chain::{self, pibd_params, SyncState, SyncStatus}; +use crate::core::core::{hash::Hashed, pmmr::segment::SegmentType}; use crate::core::global; -use crate::p2p::{self, Peer}; +use crate::core::pow::Difficulty; +use crate::p2p::{self, Capabilities, Peer}; +use crate::util::StopState; /// Fast sync has 3 "states": /// * syncing headers @@ -37,6 +44,12 @@ pub struct StateSync { last_logged_time: i64, last_download_size: u64, + + pibd_aborted: bool, + earliest_zero_pibd_peer_time: Option>, + + // Used bitmap_output_root, in case of error we better to bun all related peers + output_bitmap_root_header_hash: Option, } impl StateSync { @@ -53,6 +66,33 @@ impl StateSync { state_sync_peer: None, last_logged_time: 0, last_download_size: 0, + pibd_aborted: false, + earliest_zero_pibd_peer_time: None, + output_bitmap_root_header_hash: None, + } + } + + /// Record earliest time at which we had no suitable + /// peers for continuing PIBD + pub fn set_earliest_zero_pibd_peer_time(&mut self, t: Option>) { + self.earliest_zero_pibd_peer_time = t; + } + + /// Flag to abort PIBD process within StateSync, intentionally separate from `sync_state`, + /// which can be reset between calls + pub fn set_pibd_aborted(&mut self) { + self.pibd_aborted = true; + } + + fn reset_chain(&mut self) { + if let Err(e) = self.chain.reset_pibd_head() { + error!("pibd_sync restart: reset pibd_head error = {}", e); + } + if let Err(e) = self.chain.reset_chain_head_to_genesis() { + error!("pibd_sync restart: chain reset to genesis error = {}", e); + } + if let Err(e) = self.chain.reset_prune_lists() { + error!("pibd_sync restart: reset prune lists error = {}", e); } } @@ -65,6 +105,7 @@ impl StateSync { head: &chain::Tip, tail: &chain::Tip, highest_height: u64, + stop_state: Arc, ) -> bool { trace!("state_sync: head.height: {}, tail.height: {}. header_head.height: {}, highest_height: {}", head.height, tail.height, header_head.height, highest_height, @@ -78,15 +119,61 @@ impl StateSync { sync_need_restart = true; } + // Determine whether we're going to try using PIBD or whether we've already given up + // on it + let using_pibd = !matches!( + self.sync_state.status(), + SyncStatus::TxHashsetPibd { aborted: true, .. }, + ) && !self.pibd_aborted; + + // Check whether we've errored and should restart pibd + if using_pibd { + if let SyncStatus::TxHashsetPibd { errored: true, .. } = self.sync_state.status() { + // So far in case of error, it is allways something bad happens, we was under attack, data that we got + // is not valid as whole, even all the blocks was fine. + + // That is why we really want to ban all perrs that supported original bitmap_output hash + // Reason for that - that so far it is the only way to fool the node. All other hashes are part of the headers + + if let Some(output_bitmap_root_header_hash) = self.output_bitmap_root_header_hash { + // let's check for supporters and ban who ever commited to the same hash + warn!("Because of PIBD sync error banning peers that was involved. output_bitmap_root_header_hash={}", output_bitmap_root_header_hash); + for peer in self.peers.iter() { + if peer.commited_to_pibd_bitmap_output_root(&output_bitmap_root_header_hash) + { + if let Err(err) = self + .peers + .ban_peer(peer.info.addr.clone(), ReasonForBan::PibdFailure) + { + error!("Unable to ban the peer {}, error {}", &peer.info.addr, err); + } + } + } + self.output_bitmap_root_header_hash = None; + } + + let archive_header = self.chain.txhashset_archive_header_header_only().unwrap(); + error!("PIBD Reported Failure - Restarting Sync"); + // reset desegmenter state + self.chain.reset_desegmenter(); + self.reset_chain(); + self.sync_state + .update_pibd_progress(false, false, 0, 1, &archive_header); + sync_need_restart = true; + } + } + // check peer connection status of this sync - if let Some(ref peer) = self.state_sync_peer { - if let SyncStatus::TxHashsetDownload { .. } = self.sync_state.status() { - if !peer.is_connected() { - sync_need_restart = true; - info!( - "state_sync: peer connection lost: {:?}. restart", - peer.info.addr, - ); + if !using_pibd { + if let Some(ref peer) = self.state_sync_peer { + if let SyncStatus::TxHashsetDownload { .. } = self.sync_state.status() { + if !peer.is_connected() { + sync_need_restart = true; + info!( + "state_sync: peer connection lost: {:?}. restart", + peer.info.addr, + ); + } } } } @@ -115,46 +202,418 @@ impl StateSync { // run fast sync if applicable, normally only run one-time, except restart in error if sync_need_restart || header_head.height == highest_height { - let (go, download_timeout) = self.state_sync_due(); + if using_pibd { + if sync_need_restart { + return true; + } + let (launch, _download_timeout) = self.state_sync_due(); + let archive_header = { self.chain.txhashset_archive_header_header_only().unwrap() }; + if launch { + self.sync_state + .update_pibd_progress(false, false, 0, 1, &archive_header); + } + + let archive_header = self.chain.txhashset_archive_header_header_only().unwrap(); + + self.ban_inactive_pibd_peers(); + self.make_pibd_hand_shake(&archive_header); + + let mut has_segmenter = true; + if self.chain.get_desegmenter(&archive_header).read().is_none() { + has_segmenter = false; + if let Some(bitmap_output_root) = + self.select_pibd_bitmap_output_root(&archive_header) + { + self.output_bitmap_root_header_hash = + Some((bitmap_output_root, archive_header.hash()).hash()); + // Restting chain because PIBD is not tolarate to the output bitmaps change. + // Sinse we dont handle that (it is posible to handle by merging bitmaps), we + // better to reset the chain. + // Note, every 12 hours the root will be changed, so PIBD process must finish before + self.reset_chain(); + if let Err(e) = self + .chain + .create_desegmenter(&archive_header, bitmap_output_root) + { + error!( + "Unable to create desegmenter for header at {}, Error: {}", + archive_header.height, e + ); + } else { + has_segmenter = true; + } + } + } + + if has_segmenter { + // Continue our PIBD process (which returns true if all segments are in) + match self.continue_pibd(&archive_header) { + Ok(true) => { + let desegmenter = self.chain.get_desegmenter(&archive_header); + // All segments in, validate + if let Some(d) = desegmenter.write().as_mut() { + if let Ok(true) = d.check_progress(self.sync_state.clone()) { + if let Err(e) = d.check_update_leaf_set_state() { + error!("error updating PIBD leaf set: {}", e); + self.sync_state.update_pibd_progress( + false, + true, + 0, + 1, + &archive_header, + ); + return false; + } + if let Err(e) = d.validate_complete_state( + self.sync_state.clone(), + stop_state.clone(), + ) { + error!("error validating PIBD state: {}", e); + self.sync_state.update_pibd_progress( + false, + true, + 0, + 1, + &archive_header, + ); + return false; + } + return true; + } + }; + } + Ok(false) => (), // nothing to do, continue + Err(e) => { + // need to restart the sync process, but not ban the peers, it is not there fault + error!("Need to restart the PIBD resync because of the error {}", e); + // resetting to none, so no peers will be banned + self.output_bitmap_root_header_hash = None; + self.sync_state.update_pibd_progress( + false, + true, + 0, + 1, + &archive_header, + ); + return false; + } + } + } + } else { + let (go, download_timeout) = self.state_sync_due(); + + if let SyncStatus::TxHashsetDownload { .. } = self.sync_state.status() { + if download_timeout { + error!("state_sync: TxHashsetDownload status timeout in 10 minutes!"); + self.sync_state + .set_sync_error(chain::Error::SyncError(format!( + "{:?}", + p2p::Error::Timeout + ))); + } + } + + if go { + self.state_sync_peer = None; + match self.request_state(&header_head) { + Ok(peer) => { + self.state_sync_peer = Some(peer); + } + Err(e) => self + .sync_state + .set_sync_error(chain::Error::SyncError(format!("{:?}", e))), + } + + self.sync_state + .update(SyncStatus::TxHashsetDownload(Default::default())); + } + } + } + true + } + + fn get_pibd_qualify_peers(&self, archive_header: &BlockHeader) -> Vec> { + // First, get max difficulty or greater peers + self.peers + .iter() + .connected() + .into_iter() + .filter(|peer| { + peer.info.height() > archive_header.height + && peer.info.capabilities.contains(Capabilities::PIBD_HIST) + }) + .collect() + } + + fn get_pibd_ready_peers(&self) -> Vec> { + if let Some(output_bitmap_root_header_hash) = self.output_bitmap_root_header_hash.as_ref() { + // First, get max difficulty or greater peers + self.peers + .iter() + .connected() + .into_iter() + .filter(|peer| { + let pibd_status = peer.pibd_status.lock(); + match &pibd_status.output_bitmap_root { + Some(output_bitmap_root) => { + let peer_output_bitmap_root_header_hash = + (output_bitmap_root, pibd_status.header_hash).hash(); + output_bitmap_root_header_hash == &peer_output_bitmap_root_header_hash + && pibd_status.no_response_requests + <= pibd_params::STALE_REQUESTS_PER_PEER + } + None => false, + } + }) + .collect() + } else { + vec![] + } + } + + fn ban_inactive_pibd_peers(&self) { + let none_active_time_limit = + Utc::now().timestamp() - pibd_params::SEGMENT_REQUEST_TIMEOUT_SECS; + let mut banned_peers: Vec> = Vec::new(); + for peer in self.peers.iter().connected().into_iter() { + if let Some((requests, time)) = peer.get_pibd_no_response_state() { + // we can ban this peer if during long time we didn't hear back any correct responses + if time < none_active_time_limit && requests > pibd_params::STALE_REQUESTS_PER_PEER + { + banned_peers.push(peer.clone()); + } + } + } + for peer in banned_peers { + if let Err(err) = self + .peers + .ban_peer(peer.info.addr.clone(), ReasonForBan::PibdInactive) + { + error!("Unable to ban the peer {}, error {}", &peer.info.addr, err); + } + } + } + + fn make_pibd_hand_shake(&self, archive_header: &BlockHeader) { + let peers = self.get_pibd_qualify_peers(archive_header); + + // Minimal interval to send request for starting the PIBD sync process + + let last_handshake_time = + Utc::now().timestamp() - pibd_params::SEGMENT_REQUEST_TIMEOUT_SECS; + + for peer in peers { + let mut need_sync = false; + { + // we don't want keep lock for a long time, that is why using need_sync to make api call later + let mut pibd_status = peer.pibd_status.lock(); + if (pibd_status.header_height < archive_header.height + || pibd_status.output_bitmap_root.is_none()) + && pibd_status.initiate_pibd_request_time < last_handshake_time + { + pibd_status.initiate_pibd_request_time = last_handshake_time; + need_sync = true; + } + } - if let SyncStatus::TxHashsetDownload { .. } = self.sync_state.status() { - if download_timeout { - error!("state_sync: TxHashsetDownload status timeout in 10 minutes!"); - self.sync_state.set_sync_error( - chain::ErrorKind::SyncError(format!("{:?}", p2p::Error::Timeout)).into(), + if need_sync { + if let Err(e) = + peer.send_start_pibd_sync_request(archive_header.height, archive_header.hash()) + { + warn!( + "Error sending start_pibd_sync_request to peer at {}, reason: {:?}", + peer.info.addr, e + ); + } else { + info!( + "Sending handshake start_pibd_sync_request to peer at {}", + peer.info.addr ); } } + } + } + + // Select a random peer and take it hash. + // Alternative approach is select the largest group, but I think it is less attack resistant. + // Download process takes time, so even if we ban all group after, still majority will be able to control + // the sync process. Using random will give a chances to even single 'good' peer. + fn select_pibd_bitmap_output_root(&self, archive_header: &BlockHeader) -> Option { + let header_hash = archive_header.hash(); + + let handshake_time_limit = + Utc::now().timestamp() - pibd_params::SEGMENT_REQUEST_TIMEOUT_SECS / 2; + + let mut min_handshake_time = handshake_time_limit + 1; + + let mut rng = rand::thread_rng(); + + let output_bitmap_roots: Vec = self + .peers + .iter() + .into_iter() + .filter_map(|peer| { + let pibd_status = peer.pibd_status.lock(); + if pibd_status.header_height == archive_header.height + && pibd_status.header_hash == header_hash + && pibd_status.output_bitmap_root.is_some() + { + min_handshake_time = + std::cmp::min(min_handshake_time, pibd_status.initiate_pibd_request_time); + Some(pibd_status.output_bitmap_root.unwrap()) + } else { + None + } + }) + .collect(); + + if output_bitmap_roots.is_empty() + || (min_handshake_time >= handshake_time_limit && output_bitmap_roots.len() < 3) + { + return None; + } + + info!( + "selecting pibd bitmap_output_root from {:?}", + output_bitmap_roots + ); + return Some(output_bitmap_roots[rng.gen_range(0, output_bitmap_roots.len())]); + } + + /// Continue the PIBD process, returning true if the desegmenter is reporting + /// that the process is done + fn continue_pibd(&mut self, archive_header: &BlockHeader) -> Result { + // Check the state of our chain to figure out what we should be requesting next + let desegmenter = self.chain.get_desegmenter(&archive_header); + + // Remove stale requests, if we haven't recieved the segment within a minute re-request + // TODO: verify timing + self.sync_state + .remove_stale_pibd_requests(pibd_params::SEGMENT_REQUEST_TIMEOUT_SECS); + + // Apply segments... TODO: figure out how this should be called, might + // need to be a separate thread. + if let Some(mut de) = desegmenter.try_write() { + if let Some(d) = de.as_mut() { + let res = d.apply_next_segments(); + if let Err(e) = res { + error!("error applying segment: {}", e); + self.sync_state + .update_pibd_progress(false, true, 0, 1, &archive_header); + return Ok(false); + } + } + } + + let pibd_peers = self.get_pibd_ready_peers(); + + // Choose a random "most work" peer, preferring outbound if at all possible. + let mut outbound_peers: Vec> = Vec::new(); + let mut inbound_peers: Vec> = Vec::new(); + let mut rng = rand::thread_rng(); + + for p in pibd_peers { + if p.info.is_outbound() { + outbound_peers.push(p); + } else if p.info.is_inbound() { + inbound_peers.push(p); + } + } + + let peer_num = if outbound_peers.len() > 0 { + outbound_peers.len() + } else { + inbound_peers.len() + }; + + let desired_segments_num = std::cmp::min( + pibd_params::SEGMENT_REQUEST_LIMIT, + pibd_params::SEGMENT_REQUEST_PER_PEER * peer_num, + ); + + let mut next_segment_ids = vec![]; + if let Some(d) = desegmenter.write().as_mut() { + if let Ok(true) = d.check_progress(self.sync_state.clone()) { + return Ok(true); + } + // Figure out the next segments we need + // (12 is divisible by 3, to try and evenly spread the requests among the 3 + // main pmmrs. Bitmaps segments will always be requested first) + next_segment_ids = d.next_desired_segments(std::cmp::max(1, desired_segments_num))?; + } + + // For each segment, pick a desirable peer and send message + // (Provided we're not waiting for a response for this message from someone else) + for seg_id in next_segment_ids.iter() { + if self.sync_state.contains_pibd_segment(seg_id) { + trace!("Request list contains, continuing: {:?}", seg_id); + continue; + } + + let peer = outbound_peers + .choose(&mut rng) + .or_else(|| inbound_peers.choose(&mut rng)); + debug!( + "Has {} PIBD ready peers, Chosen peer is {:?}", + peer_num, peer + ); - if go { - self.state_sync_peer = None; - match self.request_state(&header_head) { - Ok(peer) => { - self.state_sync_peer = Some(peer); + match peer { + None => { + // If there are no suitable PIBD-Enabled peers, AND there hasn't been one for a minute, + // abort PIBD and fall back to txhashset download + // Waiting a minute helps ensures that the cancellation isn't simply due to a single non-PIBD enabled + // peer having the max difficulty + if let None = self.earliest_zero_pibd_peer_time { + self.set_earliest_zero_pibd_peer_time(Some(Utc::now())); } - Err(e) => self - .sync_state - .set_sync_error(chain::ErrorKind::SyncError(format!("{:?}", e)).into()), - } - - // to avoid the confusing log, - // update the final HeaderSync state mainly for 'current_height' - self.sync_state.update_if( - SyncStatus::HeaderSync { - current_height: header_head.height, - highest_height, - }, - |s| match s { - SyncStatus::HeaderSync { .. } => true, - _ => false, - }, - ); + if self.earliest_zero_pibd_peer_time.unwrap() + + Duration::seconds(pibd_params::TXHASHSET_ZIP_FALLBACK_TIME_SECS) + < Utc::now() + { + // random abort test + info!("No PIBD-enabled max-difficulty peers for the past {} seconds - Aborting PIBD and falling back to TxHashset.zip download", pibd_params::TXHASHSET_ZIP_FALLBACK_TIME_SECS); + self.sync_state + .update_pibd_progress(true, true, 0, 1, &archive_header); + self.sync_state + .set_sync_error(chain::Error::AbortingPIBDError); + self.set_pibd_aborted(); + return Ok(false); + } + } + Some(p) => { + self.set_earliest_zero_pibd_peer_time(None); - self.sync_state - .update(SyncStatus::TxHashsetDownload(Default::default())); + self.sync_state.add_pibd_segment(seg_id); + let res = match seg_id.segment_type { + SegmentType::Bitmap => p.send_bitmap_segment_request( + archive_header.hash(), + seg_id.identifier.clone(), + ), + SegmentType::Output => p.send_output_segment_request( + archive_header.hash(), + seg_id.identifier.clone(), + ), + SegmentType::RangeProof => p.send_rangeproof_segment_request( + archive_header.hash(), + seg_id.identifier.clone(), + ), + SegmentType::Kernel => p.send_kernel_segment_request( + archive_header.hash(), + seg_id.identifier.clone(), + ), + }; + if let Err(e) = res { + info!( + "Error sending request to peer at {}, reason: {:?}", + p.info.addr, e + ); + self.sync_state.remove_pibd_segment(seg_id); + } + } } } - true + Ok(false) } fn request_state(&self, header_head: &chain::Tip) -> Result, p2p::Error> { @@ -163,7 +622,24 @@ impl StateSync { let mut txhashset_height = header_head.height.saturating_sub(threshold); txhashset_height = txhashset_height.saturating_sub(txhashset_height % archive_interval); - if let Some(peer) = self.peers.most_work_peer() { + let peers_iter = || { + self.peers + .iter() + .with_capabilities(Capabilities::TXHASHSET_HIST) + .connected() + }; + + // Filter peers further based on max difficulty. + let max_diff = peers_iter().max_difficulty().unwrap_or(Difficulty::zero()); + let peers_iter = || peers_iter().with_difficulty(|x| x >= max_diff); + + // Choose a random "most work" peer, preferring outbound if at all possible. + let peer = peers_iter().outbound().choose_random().or_else(|| { + warn!("no suitable outbound peer for state sync, considering inbound"); + peers_iter().inbound().choose_random() + }); + + if let Some(peer) = peer { // ask for txhashset at state_sync_threshold let mut txhashset_head = self .chain diff --git a/servers/src/grin/sync/syncer.rs b/servers/src/grin/sync/syncer.rs index c84180a7a0..670a800512 100644 --- a/servers/src/grin/sync/syncer.rs +++ b/servers/src/grin/sync/syncer.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -79,7 +79,15 @@ impl SyncRunner { if self.stop_state.is_stopped() { break; } - let wp = self.peers.more_or_same_work_peers()?; + // Count peers with at least our difficulty. + let wp = self + .peers + .iter() + .outbound() + .with_difficulty(|x| x >= head.total_difficulty) + .connected() + .count(); + // exit loop when: // * we have more than MIN_PEERS more_or_same_work peers // * we are synced already, e.g. grin was quickly restarted @@ -185,21 +193,31 @@ impl SyncRunner { let tail = self.chain.tail().unwrap_or_else(|_| head.clone()); let header_head = unwrap_or_restart_loop!(self.chain.header_head()); + // "sync_head" allows us to sync against a large fork on the header chain + // we track this during an extended header sync + let sync_status = self.sync_state.status(); + + let sync_head = match sync_status { + SyncStatus::HeaderSync { sync_head, .. } => sync_head, + _ => header_head, + }; + // run each sync stage, each of them deciding whether they're needed // except for state sync that only runs if body sync return true (means txhashset is needed) - unwrap_or_restart_loop!(header_sync.check_run(&header_head, highest_height)); + unwrap_or_restart_loop!(header_sync.check_run(sync_head)); let mut check_state_sync = false; match self.sync_state.status() { - SyncStatus::TxHashsetDownload { .. } - | SyncStatus::TxHashsetSetup + SyncStatus::TxHashsetPibd { .. } + | SyncStatus::TxHashsetDownload { .. } + | SyncStatus::TxHashsetSetup { .. } | SyncStatus::TxHashsetRangeProofsValidation { .. } | SyncStatus::TxHashsetKernelsValidation { .. } | SyncStatus::TxHashsetSave | SyncStatus::TxHashsetDone => check_state_sync = true, _ => { // skip body sync if header chain is not synced. - if header_head.height < highest_height { + if sync_head.height < highest_height { continue; } @@ -212,7 +230,13 @@ impl SyncRunner { } if check_state_sync { - state_sync.check_run(&header_head, &head, &tail, highest_height); + state_sync.check_run( + &header_head, + &head, + &tail, + highest_height, + self.stop_state.clone(), + ); } } } @@ -222,7 +246,23 @@ impl SyncRunner { fn needs_syncing(&self) -> Result<(bool, u64), chain::Error> { let local_diff = self.chain.head()?.total_difficulty; let mut is_syncing = self.sync_state.is_syncing(); - let peer = self.peers.most_work_peer(); + + // Find a peer with greatest known difficulty. + // Consider all peers, both inbound and outbound. + // We will prioritize syncing against outbound peers exclusively when possible. + // But we do support syncing against an inbound peer if greater work than any outbound peers. + let max_diff = self + .peers + .iter() + .connected() + .max_difficulty() + .unwrap_or(Difficulty::zero()); + let peer = self + .peers + .iter() + .with_difficulty(|x| x >= max_diff) + .connected() + .choose_random(); let peer_info = if let Some(p) = peer { p.info.clone() diff --git a/servers/src/lib.rs b/servers/src/lib.rs index eb4cc30887..18387968f2 100644 --- a/servers/src/lib.rs +++ b/servers/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -36,7 +36,7 @@ use grin_store as store; use grin_util as util; mod error; -pub use crate::error::{Error, ErrorKind}; +pub use crate::error::Error; pub mod common; mod grin; @@ -46,4 +46,4 @@ mod tor; pub use crate::common::stats::{DiffBlock, PeerStats, ServerStats, StratumStats, WorkerStats}; pub use crate::common::types::{ServerConfig, StratumServerConfig}; pub use crate::core::global::{FLOONET_DNS_SEEDS, MAINNET_DNS_SEEDS}; -pub use crate::grin::server::{Server, ServerTxPool, ServerVerifierCache}; +pub use crate::grin::server::{Server, ServerTxPool}; diff --git a/servers/src/mining.rs b/servers/src/mining.rs index 93d17d046c..ce89cce396 100644 --- a/servers/src/mining.rs +++ b/servers/src/mining.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/servers/src/mining/mine_block.rs b/servers/src/mining/mine_block.rs index 2c2758d909..42bb155545 100644 --- a/servers/src/mining/mine_block.rs +++ b/servers/src/mining/mine_block.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ //! Build a block to mine: gathers transactions from the pool, assembles //! them into a block and returns it. -use chrono::prelude::{DateTime, NaiveDateTime, Utc}; +use chrono::prelude::{DateTime, Utc}; use rand::{thread_rng, Rng}; use serde_json::{json, Value}; use std::sync::Arc; @@ -30,7 +30,7 @@ use crate::core::libtx::secp_ser; use crate::core::libtx::ProofBuilder; use crate::core::{consensus, core, global}; use crate::keychain::{ExtKeychain, Identifier, Keychain}; -use crate::{ServerTxPool, ServerVerifierCache}; +use crate::ServerTxPool; /// Fees in block to use for coinbase amount calculation /// (Duplicated from Grin wallet project) @@ -70,24 +70,17 @@ pub struct CbData { pub fn get_block( chain: &Arc, tx_pool: &ServerTxPool, - verifier_cache: ServerVerifierCache, key_id: Option, wallet_listener_url: Option, ) -> (core::Block, BlockFees) { let wallet_retry_interval = 5; // get the latest chain state and build a block on top of it - let mut result = build_block( - chain, - tx_pool, - verifier_cache.clone(), - key_id.clone(), - wallet_listener_url.clone(), - ); + let mut result = build_block(chain, tx_pool, key_id.clone(), wallet_listener_url.clone()); while let Err(e) = result { let mut new_key_id = key_id.to_owned(); match e { - self::Error::Chain(c) => match c.kind() { - chain::ErrorKind::DuplicateCommitment(_) => { + self::Error::Chain(c) => match c { + chain::Error::DuplicateCommitment(_) => { debug!( "Duplicate commit for potential coinbase detected. Trying next derivation." ); @@ -116,13 +109,7 @@ pub fn get_block( thread::sleep(Duration::from_millis(100)); } - result = build_block( - chain, - tx_pool, - verifier_cache.clone(), - new_key_id, - wallet_listener_url.clone(), - ); + result = build_block(chain, tx_pool, new_key_id, wallet_listener_url.clone()); } return result.unwrap(); } @@ -132,7 +119,6 @@ pub fn get_block( fn build_block( chain: &Arc, tx_pool: &ServerTxPool, - verifier_cache: ServerVerifierCache, key_id: Option, wallet_listener_url: Option, ) -> Result<(core::Block, BlockFees), Error> { @@ -166,7 +152,7 @@ fn build_block( }; // build the coinbase and the block itself - let fees = txs.iter().map(|tx| tx.fee()).sum(); + let fees = txs.iter().map(|tx| tx.fee(head.height)).sum(); let height = head.height + 1; let block_fees = BlockFees { fees, @@ -178,11 +164,15 @@ fn build_block( let mut b = core::Block::from_reward(&head, &txs, output, kernel, difficulty.difficulty)?; // making sure we're not spending time mining a useless block - b.validate(&head.total_kernel_offset, verifier_cache)?; + b.validate(&head.total_kernel_offset)?; b.header.pow.nonce = thread_rng().gen(); b.header.pow.secondary_scaling = difficulty.secondary_scaling; - b.header.timestamp = DateTime::::from_utc(NaiveDateTime::from_timestamp(now_sec, 0), Utc); + let ts = DateTime::from_timestamp(now_sec, 0); + if ts.is_none() { + return Err(Error::General("Utc::now into timestamp".into())); + } + b.header.timestamp = ts.unwrap().to_utc(); debug!( "Built new block with {} inputs and {} outputs, block difficulty: {}, cumulative difficulty {}", @@ -196,24 +186,21 @@ fn build_block( match chain.set_txhashset_roots(&mut b) { Ok(_) => Ok((b, block_fees)), Err(e) => { - match e.kind() { + match e { // If this is a duplicate commitment then likely trying to use // a key that hass already been derived but not in the wallet // for some reason, allow caller to retry. - chain::ErrorKind::DuplicateCommitment(e) => Err(Error::Chain( - chain::ErrorKind::DuplicateCommitment(e).into(), - )), + chain::Error::DuplicateCommitment(e) => { + Err(Error::Chain(chain::Error::DuplicateCommitment(e))) + } // Some other issue, possibly duplicate kernel _ => { error!("Error setting txhashset root to build a block: {:?}", e); - Err(Error::Chain( - chain::ErrorKind::Other(format!( - "Error setting txhashset root to build a block: {:?}", - e - )) - .into(), - )) + Err(Error::Chain(chain::Error::Other(format!( + "Error setting txhashset root to build a block: {:?}", + e + )))) } } } @@ -279,9 +266,7 @@ fn create_coinbase(dest: &str, block_fees: &BlockFees) -> Result }); trace!("Sending build_coinbase request: {}", req_body); - let req = api::client::create_post_request(url.as_str(), None, &req_body)?; - let timeout = api::client::TimeOut::default(); let res: String = api::client::send_request(req, timeout).map_err(|e| { let report = format!( diff --git a/servers/src/mining/stratum_data.rs b/servers/src/mining/stratum_data.rs index 7a9c3fffcd..0550408455 100644 --- a/servers/src/mining/stratum_data.rs +++ b/servers/src/mining/stratum_data.rs @@ -18,6 +18,7 @@ // Worker Object - a connected stratum client - a miner, pool, proxy, etc... use crate::common::stats::{StratumStats, WorkerStats}; +use crate::core::consensus::graph_weight; use crate::util::RwLock; use chrono::prelude::Utc; use futures::channel::mpsc; @@ -159,7 +160,11 @@ impl WorkersList { // Or just somebody want to attack the mining pool. // let worker_id = stratum_stats.worker_stats.len(); - let worker_id = self.stratum_stats.allocate_new_worker(); + let worker_id = self.stratum_stats.allocate_new_worker( + self.stratum_stats + .minimum_share_difficulty + .load(Ordering::Relaxed), + ); let worker = Worker::new(worker_id, ip, tx, kill_switch); let num_workers = self.workers_map.add(&worker_id, worker); @@ -253,4 +258,32 @@ impl WorkersList { .network_difficulty .store(difficulty, Ordering::Relaxed); } + + pub fn update_network_hashrate(&self) { + let network_hashrate = 42.0 + * (self + .stratum_stats + .network_difficulty + .load(Ordering::Relaxed) as f64 + / graph_weight( + self.stratum_stats.block_height.load(Ordering::Relaxed), + self.stratum_stats.edge_bits.load(Ordering::Relaxed) as u8, + ) as f64) / 60.0; + + self.stratum_stats + .network_hashrate + .store(network_hashrate, Ordering::Relaxed); + } + + pub fn update_edge_bits(&self, edge_bits: u16) { + self.stratum_stats + .edge_bits + .store(edge_bits, Ordering::Relaxed); + } + + pub fn increment_block_found(&self) { + self.stratum_stats + .blocks_found + .fetch_add(1, Ordering::Relaxed); + } } diff --git a/servers/src/mining/stratumserver.rs b/servers/src/mining/stratumserver.rs index 33e9b4dfd2..b1f8872e16 100644 --- a/servers/src/mining/stratumserver.rs +++ b/servers/src/mining/stratumserver.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -23,8 +23,6 @@ use tokio_util::codec::{Framed, LinesCodec}; use crate::util::RwLock; use chrono::prelude::Utc; -use serde; -use serde_json; use serde_json::Value; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::sync::atomic::{AtomicI32, Ordering}; @@ -38,13 +36,14 @@ use crate::common::stats::StratumStats; use crate::common::types::StratumServerConfig; use crate::core::core::hash::Hashed; use crate::core::core::Block; +use crate::core::global; use crate::core::stratum::connections; use crate::core::{pow, ser}; use crate::keychain; use crate::mining::mine_block; use crate::util; use crate::util::ToHex; -use crate::{ServerTxPool, ServerVerifierCache}; +use crate::ServerTxPool; use std::cmp::min; // ---------------------------------------- @@ -185,8 +184,8 @@ struct State { // nothing has changed. We only want to create a key_id for each new block, // and reuse it when we rebuild the current block to add new tx. current_key_id: Option, - current_difficulty: u64, - minimum_share_difficulty: u64, + current_difficulty: u64, // scaled + minimum_share_difficulty: u64, // unscaled } impl State { @@ -420,7 +419,8 @@ impl Handler { return Err(RpcError::too_late()); } - let share_difficulty: u64; + let scaled_share_difficulty: u64; + let unscaled_share_difficulty: u64; let mut share_is_block = false; let mut b: Block = b.unwrap().clone(); @@ -440,18 +440,17 @@ impl Handler { return Err(RpcError::cannot_validate()); } - // Get share difficulty - share_difficulty = b.header.pow.to_difficulty(b.header.height).to_num(); + // Get share difficulty values + scaled_share_difficulty = b.header.pow.to_difficulty(b.header.height).to_num(); + unscaled_share_difficulty = b.header.pow.to_unscaled_difficulty().to_num(); + // Note: state.minimum_share_difficulty is unscaled + // state.current_difficulty is scaled // If the difficulty is too low its an error - if (b.header.pow.is_primary() && share_difficulty < minimum_share_difficulty * 7_936) - || b.header.pow.is_secondary() - && share_difficulty - < minimum_share_difficulty * b.header.pow.secondary_scaling as u64 - { + if unscaled_share_difficulty < minimum_share_difficulty { // Return error status error!( "(Server ID: {}) Share at height {}, hash {}, edge_bits {}, nonce {}, job_id {} rejected due to low difficulty: {}/{}", - self.id, params.height, b.hash(), params.edge_bits, params.nonce, params.job_id, share_difficulty, minimum_share_difficulty, + self.id, params.height, b.hash(), params.edge_bits, params.nonce, params.job_id, unscaled_share_difficulty, minimum_share_difficulty, ); self.workers .update_stats(worker_id, |worker_stats| worker_stats.num_rejected += 1); @@ -459,13 +458,13 @@ impl Handler { } // If the difficulty is high enough, submit it (which also validates it) - if share_difficulty >= current_difficulty { + if scaled_share_difficulty >= current_difficulty { // This is a full solution, submit it to the network let res = self.chain.process_block(b.clone(), chain::Options::MINE); if let Err(e) = res { // Return error status error!( - "(Server ID: {}) Failed to validate solution at height {}, hash {}, edge_bits {}, nonce {}, job_id {}, {}: {}", + "(Server ID: {}) Failed to validate solution at height {}, hash {}, edge_bits {}, nonce {}, job_id {}, {}", self.id, params.height, b.hash(), @@ -473,7 +472,6 @@ impl Handler { params.nonce, params.job_id, e, - e.backtrace().unwrap(), ); self.workers .update_stats(worker_id, |worker_stats| worker_stats.num_rejected += 1); @@ -482,6 +480,7 @@ impl Handler { share_is_block = true; self.workers .update_stats(worker_id, |worker_stats| worker_stats.num_blocks_found += 1); + self.workers.increment_block_found(); // Log message to make it obvious we found a block let stats = self .workers @@ -516,6 +515,7 @@ impl Handler { } } // Log this as a valid share + self.workers.update_edge_bits(params.edge_bits as u16); if let Some(worker) = self.workers.get_worker(&worker_id) { let submitted_by = match worker.login { None => worker.id.to_string(), @@ -530,7 +530,7 @@ impl Handler { b.header.pow.proof.edge_bits, b.header.pow.nonce, params.job_id, - share_difficulty, + scaled_share_difficulty, current_difficulty, submitted_by, ); @@ -571,12 +571,7 @@ impl Handler { self.workers.broadcast(job_request_json); } - pub fn run( - &self, - config: &StratumServerConfig, - tx_pool: &ServerTxPool, - verifier_cache: ServerVerifierCache, - ) { + pub fn run(&self, config: &StratumServerConfig, tx_pool: &ServerTxPool) { debug!("Run main loop"); let mut deadline: i64 = 0; let mut head = self.chain.head().unwrap(); @@ -597,10 +592,9 @@ impl Handler { head = self.chain.head().unwrap(); let latest_hash = head.last_block_h; - // Build a new block if: - // There is a new block on the chain - // or We are rebuilding the current one to include new transactions - // and there is at least one worker connected + // Build a new block if there is at least one worker and + // There is a new block on the chain or its time to rebuild + // the current one to include new transactions if (current_hash != latest_hash || Utc::now().timestamp() >= deadline) && self.workers.count() > 0 { @@ -611,14 +605,13 @@ impl Handler { } else { None }; - // If this is a new block, clear the current_block version history + // If this is a new block we will clear the current_block version history let clear_blocks = current_hash != latest_hash; // Build the new block (version) let (new_block, block_fees) = mine_block::get_block( &self.chain, tx_pool, - verifier_cache.clone(), self.current_state.read().current_key_id.clone(), wallet_listener_url, ); @@ -626,13 +619,14 @@ impl Handler { { let mut state = self.current_state.write(); + // scaled difficulty state.current_difficulty = (new_block.header.total_difficulty() - head.total_difficulty).to_num(); state.current_key_id = block_fees.key_id(); current_hash = latest_hash; - // set the minimum acceptable share difficulty for this block + // set the minimum acceptable share unscaled difficulty for this block state.minimum_share_difficulty = cmp::min(config.minimum_share_difficulty, state.current_difficulty); } @@ -640,20 +634,24 @@ impl Handler { // set a new deadline for rebuilding with fresh transactions deadline = Utc::now().timestamp() + config.attempt_time_per_block as i64; + // Update the mining stats self.workers.update_block_height(new_block.header.height); - self.workers - .update_network_difficulty(self.current_state.read().current_difficulty); + let difficulty = new_block.header.total_difficulty() - head.total_difficulty; + self.workers.update_network_difficulty(difficulty.to_num()); + self.workers.update_network_hashrate(); { let mut state = self.current_state.write(); + // If this is a new block we will clear the current_block version history if clear_blocks { state.current_block_versions.clear(); } + // Add this new block candidate onto our list of block versions for this height state.current_block_versions.push(new_block); } - // Send this job to all connected workers } + // Send this job to all connected workers self.broadcast_job(); } @@ -891,7 +889,6 @@ pub struct StratumServer { config: StratumServerConfig, chain: Arc, pub tx_pool: ServerTxPool, - verifier_cache: ServerVerifierCache, sync_state: Arc, stratum_stats: Arc, ip_pool: Arc, @@ -904,7 +901,6 @@ impl StratumServer { config: StratumServerConfig, chain: Arc, tx_pool: ServerTxPool, - verifier_cache: ServerVerifierCache, stratum_stats: Arc, ip_pool: Arc, ) -> StratumServer { @@ -913,7 +909,6 @@ impl StratumServer { config, chain, tx_pool, - verifier_cache, sync_state: Arc::new(SyncState::new()), stratum_stats: stratum_stats, ip_pool, @@ -926,10 +921,10 @@ impl StratumServer { /// existing chain anytime required and sending that to the connected /// stratum miner, proxy, or pool, and accepts full solutions to /// be submitted. - pub fn run_loop(&mut self, edge_bits: u32, proof_size: usize, sync_state: Arc) { + pub fn run_loop(&mut self, proof_size: usize, sync_state: Arc) { info!( - "(Server ID: {}) Starting stratum server with edge_bits = {}, proof_size = {}, config: {:?}", - self.id, edge_bits, proof_size, self.config + "(Server ID: {}) Starting stratum server with proof_size = {}", + self.id, proof_size ); self.sync_state = sync_state; @@ -953,7 +948,10 @@ impl StratumServer { self.stratum_stats.is_running.store(true, Ordering::Relaxed); self.stratum_stats .edge_bits - .store(edge_bits as u16, Ordering::Relaxed); + .store(global::min_edge_bits() as u16 + 1, Ordering::Relaxed); + self.stratum_stats + .minimum_share_difficulty + .store(self.config.minimum_share_difficulty, Ordering::Relaxed); warn!( "Stratum server started on {}", @@ -965,7 +963,7 @@ impl StratumServer { thread::sleep(Duration::from_millis(50)); } - handler.run(&self.config, &self.tx_pool, self.verifier_cache.clone()); + handler.run(&self.config, &self.tx_pool); } // fn run_loop() } // StratumServer diff --git a/servers/src/mining/test_miner.rs b/servers/src/mining/test_miner.rs index a1b12126d2..2f55521439 100644 --- a/servers/src/mining/test_miner.rs +++ b/servers/src/mining/test_miner.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -27,7 +27,7 @@ use crate::core::core::{Block, BlockHeader}; use crate::core::global; use crate::mining::mine_block; use crate::util::StopState; -use crate::{ServerTxPool, ServerVerifierCache}; +use crate::ServerTxPool; use grin_chain::SyncState; use std::thread; use std::time::Duration; @@ -36,7 +36,6 @@ pub struct Miner { config: StratumServerConfig, chain: Arc, tx_pool: ServerTxPool, - verifier_cache: ServerVerifierCache, stop_state: Arc, sync_state: Arc, // Just to hold the port we're on, so this miner can be identified @@ -51,7 +50,6 @@ impl Miner { config: StratumServerConfig, chain: Arc, tx_pool: ServerTxPool, - verifier_cache: ServerVerifierCache, stop_state: Arc, sync_state: Arc, ) -> Miner { @@ -59,7 +57,6 @@ impl Miner { config, chain, tx_pool, - verifier_cache, debug_output_id: String::from("none"), stop_state, sync_state, @@ -156,7 +153,6 @@ impl Miner { let (mut b, block_fees) = mine_block::get_block( &self.chain, &self.tx_pool, - self.verifier_cache.clone(), key_id.clone(), wallet_listener_url.clone(), ); diff --git a/servers/src/tor/config.rs b/servers/src/tor/config.rs index 15c7431066..099b50f80b 100644 --- a/servers/src/tor/config.rs +++ b/servers/src/tor/config.rs @@ -14,7 +14,7 @@ //! Tor Configuration + Onion (Hidden) Service operations use crate::util::secp::key::SecretKey; -use crate::{Error, ErrorKind}; +use crate::Error; use grin_util::OnionV3Address; use ed25519_dalek::PublicKey as DalekPublicKey; @@ -26,8 +26,8 @@ use std::fs::{self, File}; use std::io::{Read, Write}; use std::path::{Path, MAIN_SEPARATOR}; -use failure::ResultExt; use grin_core::global; +use grin_util::secp::Secp256k1; pub const SEC_KEY_FILE_COPY: &str = "secret_key"; const SEC_KEY_FILE: &str = "hs_ed25519_secret_key"; @@ -42,7 +42,7 @@ const HIDDEN_SERVICES_DIR: &str = "onion_service_addresses"; fn set_permissions(file_path: &str) -> Result<(), Error> { use std::os::unix::prelude::*; fs::set_permissions(file_path, fs::Permissions::from_mode(0o700)).map_err(|e| { - ErrorKind::IO(format!( + Error::IO(format!( "Unable to update permissions for {}, {}", file_path, e )) @@ -88,19 +88,19 @@ impl TorRcConfig { /// write to file pub fn write_to_file(&self, file_path: &str) -> Result<(), Error> { let mut file = File::create(file_path) - .map_err(|e| ErrorKind::IO(format!("Unable to create file {}, {}", file_path, e)))?; + .map_err(|e| Error::IO(format!("Unable to create file {}, {}", file_path, e)))?; for item in &self.items { file.write_all(item.name.as_bytes()).map_err(|e| { - ErrorKind::IO(format!("Unable to write into file {}, {}", file_path, e)) + Error::IO(format!("Unable to write into file {}, {}", file_path, e)) })?; file.write_all(b" ").map_err(|e| { - ErrorKind::IO(format!("Unable to write into file {}, {}", file_path, e)) + Error::IO(format!("Unable to write into file {}, {}", file_path, e)) })?; file.write_all(item.value.as_bytes()).map_err(|e| { - ErrorKind::IO(format!("Unable to write into file {}, {}", file_path, e)) + Error::IO(format!("Unable to write into file {}, {}", file_path, e)) })?; file.write_all(b"\n").map_err(|e| { - ErrorKind::IO(format!("Unable to write into file {}, {}", file_path, e)) + Error::IO(format!("Unable to write into file {}, {}", file_path, e)) })?; } Ok(()) @@ -113,18 +113,18 @@ pub fn create_onion_service_sec_key_file( ) -> Result<(), Error> { let key_file_path = &format!("{}{}{}", os_directory, MAIN_SEPARATOR, SEC_KEY_FILE); let mut file = File::create(key_file_path) - .map_err(|e| ErrorKind::IO(format!("Unable to create file {}, {}", key_file_path, e)))?; + .map_err(|e| Error::IO(format!("Unable to create file {}, {}", key_file_path, e)))?; // Tag is always 32 bytes, so pad with null zeroes file.write(b"== ed25519v1-secret: type0 ==\0\0\0") .map_err(|e| { - ErrorKind::IO(format!( + Error::IO(format!( "Unable to write into file {}, {}", key_file_path, e )) })?; let expanded_skey: ExpandedSecretKey = ExpandedSecretKey::from(sec_key); file.write_all(&expanded_skey.to_bytes()).map_err(|e| { - ErrorKind::IO(format!( + Error::IO(format!( "Unable to write into file {}, {}", key_file_path, e )) @@ -135,9 +135,9 @@ pub fn create_onion_service_sec_key_file( pub fn create_sec_key_file(os_directory: &str, sec_key: &DalekSecretKey) -> Result<(), Error> { let key_file_path = &format!("{}{}{}", os_directory, MAIN_SEPARATOR, SEC_KEY_FILE_COPY); let mut file = File::create(key_file_path) - .map_err(|e| ErrorKind::IO(format!("Unable to create file {}, {}", key_file_path, e)))?; + .map_err(|e| Error::IO(format!("Unable to create file {}, {}", key_file_path, e)))?; file.write(&sec_key.to_bytes()).map_err(|e| { - ErrorKind::IO(format!( + Error::IO(format!( "Unable to write into file {}, {}", key_file_path, e )) @@ -145,23 +145,21 @@ pub fn create_sec_key_file(os_directory: &str, sec_key: &DalekSecretKey) -> Resu Ok(()) } -pub fn read_sec_key_file(os_directory: &str) -> Result { +pub fn read_sec_key_file(os_directory: &str, secp: &Secp256k1) -> Result { let key_file_path = &format!("{}{}{}", os_directory, MAIN_SEPARATOR, SEC_KEY_FILE_COPY); let mut file = File::open(key_file_path) - .map_err(|e| ErrorKind::IO(format!("Unable to create file {}, {}", key_file_path, e)))?; + .map_err(|e| Error::IO(format!("Unable to create file {}, {}", key_file_path, e)))?; let mut buf: [u8; SECRET_KEY_LENGTH] = [0; SECRET_KEY_LENGTH]; let sz = file .read(&mut buf) - .map_err(|e| ErrorKind::IO(format!("Unable to read from file {}, {}", key_file_path, e)))?; + .map_err(|e| Error::IO(format!("Unable to read from file {}, {}", key_file_path, e)))?; if sz != buf.len() { - return Err( - ErrorKind::IO(format!("Not found expected data at file {}", key_file_path)).into(), - ); + return Err(Error::IO(format!("Not found expected data at file {}", key_file_path)).into()); } - let sk = SecretKey::from_slice(&buf)?; + let sk = SecretKey::from_slice(secp, &buf)?; Ok(sk) } @@ -171,17 +169,17 @@ pub fn create_onion_service_pub_key_file( ) -> Result<(), Error> { let key_file_path = &format!("{}{}{}", os_directory, MAIN_SEPARATOR, PUB_KEY_FILE); let mut file = File::create(key_file_path) - .map_err(|e| ErrorKind::IO(format!("Unable to create file {}, {}", key_file_path, e)))?; + .map_err(|e| Error::IO(format!("Unable to create file {}, {}", key_file_path, e)))?; // Tag is always 32 bytes, so pad with null zeroes file.write(b"== ed25519v1-public: type0 ==\0\0\0") .map_err(|e| { - ErrorKind::IO(format!( + Error::IO(format!( "Unable to write into file {}, {}", key_file_path, e )) })?; file.write_all(pub_key.as_bytes()).map_err(|e| { - ErrorKind::IO(format!( + Error::IO(format!( "Fail to write data to file {}, {}", key_file_path, e )) @@ -192,16 +190,16 @@ pub fn create_onion_service_pub_key_file( pub fn create_onion_service_hostname_file(os_directory: &str, hostname: &str) -> Result<(), Error> { let file_path = &format!("{}{}{}", os_directory, MAIN_SEPARATOR, HOSTNAME_FILE); let mut file = File::create(file_path) - .map_err(|e| ErrorKind::IO(format!("Unable to create file {}, {}", file_path, e)))?; + .map_err(|e| Error::IO(format!("Unable to create file {}, {}", file_path, e)))?; file.write_all(&format!("{}.onion\n", hostname).as_bytes()) - .map_err(|e| ErrorKind::IO(format!("Fail to store data to file {}, {}", file_path, e)))?; + .map_err(|e| Error::IO(format!("Fail to store data to file {}, {}", file_path, e)))?; Ok(()) } pub fn create_onion_auth_clients_dir(os_directory: &str) -> Result<(), Error> { let auth_dir_path = &format!("{}{}{}", os_directory, MAIN_SEPARATOR, AUTH_CLIENTS_DIR); fs::create_dir_all(auth_dir_path) - .map_err(|e| ErrorKind::IO(format!("Unable to create dir {}, {}", auth_dir_path, e)))?; + .map_err(|e| Error::IO(format!("Unable to create dir {}, {}", auth_dir_path, e)))?; Ok(()) } /// output an onion service config for the secret key, and return the address @@ -210,7 +208,7 @@ pub fn output_onion_service_config( sec_key: &SecretKey, ) -> Result { let d_sec_key = DalekSecretKey::from_bytes(&sec_key.0) - .context(ErrorKind::ED25519Key("Unable to parse private key".into()))?; + .map_err(|e| Error::ED25519Key(format!("Unable to parse private key, {}", e)))?; let address = OnionV3Address::from_private(&sec_key.0)?; let hs_dir_file_path = format!( "{}{}{}{}{}", @@ -224,7 +222,7 @@ pub fn output_onion_service_config( // create directory if it doesn't exist fs::create_dir_all(&hs_dir_file_path) - .map_err(|e| ErrorKind::IO(format!("Unable to create dir {}, {}", hs_dir_file_path, e)))?; + .map_err(|e| Error::IO(format!("Unable to create dir {}, {}", hs_dir_file_path, e)))?; create_sec_key_file(&hs_dir_file_path, &d_sec_key)?; create_onion_service_sec_key_file(&hs_dir_file_path, &d_sec_key)?; @@ -292,7 +290,7 @@ pub fn output_tor_listener_config( // create data directory if it doesn't exist fs::create_dir_all(&tor_data_dir) - .map_err(|e| ErrorKind::IO(format!("Unable to create dir {}, {}", tor_data_dir, e)))?; + .map_err(|e| Error::IO(format!("Unable to create dir {}, {}", tor_data_dir, e)))?; let mut service_dirs = vec![]; @@ -322,7 +320,7 @@ pub fn output_tor_listener_config( pub fn _is_tor_address(input: &str) -> Result<(), Error> { match OnionV3Address::try_from(input) { Ok(_) => Ok(()), - Err(e) => Err(ErrorKind::NotOnion(format!("{}, {}", input, e)))?, + Err(e) => Err(Error::NotOnion(format!("{}, {}", input, e)))?, } } @@ -347,6 +345,7 @@ pub fn _complete_tor_address(input: &str) -> Result { mod tests { use super::*; + use grin_util::secp::ContextFlag; use rand::rngs::mock::StepRng; use crate::util::{self, secp}; @@ -364,8 +363,9 @@ mod tests { fn test_service_config() -> Result<(), Error> { let test_dir = "target/test_output/onion_service"; setup(test_dir); + let secp = Secp256k1::with_caps(ContextFlag::None); let mut test_rng = StepRng::new(1_234_567_890_u64, 1); - let sec_key = secp::key::SecretKey::new(&mut test_rng); + let sec_key = secp::key::SecretKey::new(&secp, &mut test_rng); output_onion_service_config(test_dir, &sec_key)?; clean_output_dir(test_dir); Ok(()) @@ -376,8 +376,9 @@ mod tests { global::set_local_chain_type(global::ChainTypes::AutomatedTesting); let test_dir = "./target/test_output/tor"; setup(test_dir); + let secp = Secp256k1::with_caps(ContextFlag::None); let mut test_rng = StepRng::new(1_234_567_890_u64, 1); - let sec_key = secp::key::SecretKey::new(&mut test_rng); + let sec_key = secp::key::SecretKey::new(&secp, &mut test_rng); output_tor_listener_config( test_dir, "127.0.0.1:3415", diff --git a/servers/src/tor/process.rs b/servers/src/tor/process.rs index a783c25c32..3770741619 100644 --- a/servers/src/tor/process.rs +++ b/servers/src/tor/process.rs @@ -47,7 +47,6 @@ extern crate chrono; -use failure::Fail; use regex::Regex; use std::fs::{self, File}; use std::io; @@ -64,25 +63,25 @@ const TOR_EXE_NAME: &str = "tor.exe"; #[cfg(not(windows))] const TOR_EXE_NAME: &str = "tor"; -#[derive(Fail, Debug)] +#[derive(thiserror::Error, Debug)] pub enum Error { - #[fail(display = "Tor process error, {}", _0)] + #[error("Tor process error, {0}")] Process(String), - #[fail(display = "Tor IO error, {}, {}", _0, _1)] + #[error("Tor IO error, {0}, {1}")] IO(String, io::Error), - #[fail(display = "Tor PID error, {}", _0)] + #[error("Tor PID error, {0}")] PID(String), - #[fail(display = "Tor Reported Error {}, and warnings: {:?}", _0, _1)] + #[error("Tor Reported Error {0}, and warnings: {1:?}")] Tor(String, Vec), - #[fail(display = "Tor invalid log line: {}", _0)] + #[error("Tor invalid log line: {0}")] InvalidLogLine(String), - #[fail(display = "Tor invalid bootstrap line: {}", _0)] + #[error("Tor invalid bootstrap line: {0}")] InvalidBootstrapLine(String), - #[fail(display = "Tor regex error {}, {}", _0, _1)] + #[error("Tor regex error {0}, {1}")] Regex(String, regex::Error), - #[fail(display = "Tor process not running")] + #[error("Tor process not running")] ProcessNotStarted, - #[fail(display = "Waiting for Tor respond timeout")] + #[error("Waiting for Tor respond timeout")] Timeout, } diff --git a/src/bin/cmd/client.rs b/src/bin/cmd/client.rs index a68bed5427..a74781e129 100644 --- a/src/bin/cmd/client.rs +++ b/src/bin/cmd/client.rs @@ -138,6 +138,26 @@ impl HTTPNodeClient { e.reset().unwrap(); } + pub fn reset_chain_head(&self, hash: String) { + let mut e = term::stdout().unwrap(); + let params = json!([hash]); + match self.send_json_request::<()>("reset_chain_head", ¶ms) { + Ok(_) => writeln!(e, "Successfully reset chain head {}", hash).unwrap(), + Err(_) => writeln!(e, "Failed to reset chain head {}", hash).unwrap(), + } + e.reset().unwrap(); + } + + pub fn invalidate_header(&self, hash: String) { + let mut e = term::stdout().unwrap(); + let params = json!([hash]); + match self.send_json_request::<()>("invalidate_header", ¶ms) { + Ok(_) => writeln!(e, "Successfully invalidated header: {}", hash).unwrap(), + Err(_) => writeln!(e, "Failed to invalidate header: {}", hash).unwrap(), + } + e.reset().unwrap(); + } + pub fn verify_chain(&self, assume_valid_rangeproofs_kernels: bool) { let mut e = term::stdout().unwrap(); let params = json!([assume_valid_rangeproofs_kernels]); @@ -193,6 +213,14 @@ pub fn client_command(client_args: &ArgMatches<'_>, global_config: GlobalConfig) ("listconnectedpeers", Some(_)) => { node_client.list_connected_peers(); } + ("resetchainhead", Some(args)) => { + let hash = args.value_of("hash").unwrap(); + node_client.reset_chain_head(hash.to_string()); + } + ("invalidateheader", Some(args)) => { + let hash = args.value_of("hash").unwrap(); + node_client.invalidate_header(hash.to_string()); + } ("verify-chain", Some(args)) => { let assume_valid_rangeproofs_kernels = args.is_present("fast"); node_client.verify_chain(assume_valid_rangeproofs_kernels); @@ -220,8 +248,9 @@ pub fn client_command(client_args: &ArgMatches<'_>, global_config: GlobalConfig) 0 } /// Error type wrapping underlying module errors. -#[derive(Debug)] +#[derive(Debug, thiserror::Error)] enum Error { /// RPC Error + #[error("RPC error: {0}")] RPCError(String), } diff --git a/src/bin/cmd/config.rs b/src/bin/cmd/config.rs index f6211a39c6..f78c34e98f 100644 --- a/src/bin/cmd/config.rs +++ b/src/bin/cmd/config.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/src/bin/cmd/mod.rs b/src/bin/cmd/mod.rs index 86078b7c76..9df255ce05 100644 --- a/src/bin/cmd/mod.rs +++ b/src/bin/cmd/mod.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/src/bin/cmd/server.rs b/src/bin/cmd/server.rs index f51849a3b4..d982bdbd7f 100644 --- a/src/bin/cmd/server.rs +++ b/src/bin/cmd/server.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,7 +18,6 @@ use std::thread; use std::time::Duration; use clap::ArgMatches; -use ctrlc; use futures::channel::oneshot; use crate::config::GlobalConfig; diff --git a/src/bin/grin.rs b/src/bin/grin.rs index a77c43826a..8b1f2dec16 100644 --- a/src/bin/grin.rs +++ b/src/bin/grin.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -126,9 +126,9 @@ fn real_main() -> i32 { } } - let mut config = node_config.clone().unwrap(); - let mut logging_config = config.members.as_mut().unwrap().logging.clone().unwrap(); - logging_config.tui_running = config.members.as_mut().unwrap().server.run_tui; + let config = node_config.clone().unwrap(); + let mut logging_config = config.members.as_ref().unwrap().logging.clone().unwrap(); + logging_config.tui_running = config.members.as_ref().unwrap().server.run_tui; let api_chan: &'static mut (oneshot::Sender<()>, oneshot::Receiver<()>) = Box::leak(Box::new(oneshot::channel::<()>())); @@ -152,9 +152,9 @@ fn real_main() -> i32 { log_build_info(); - // Initialize our global chain_type and feature flags (NRD kernel support currently). + // Initialize our global chain_type and feature flags (NRD kernel support currently), accept_fee_base. // These are read via global and not read from config beyond this point. - global::init_global_chain_type(config.members.unwrap().server.chain_type); + global::init_global_chain_type(config.members.as_ref().unwrap().server.chain_type); info!("Chain: {:?}", global::get_chain_type()); match global::get_chain_type() { global::ChainTypes::Mainnet => { @@ -166,6 +166,16 @@ fn real_main() -> i32 { global::init_global_nrd_enabled(true); } } + global::init_global_accept_fee_base( + config + .members + .as_ref() + .unwrap() + .server + .pool_config + .accept_fee_base, + ); + info!("Accept Fee Base: {:?}", global::get_accept_fee_base()); log_feature_flags(); // Execute subcommand diff --git a/src/bin/mwc.yml b/src/bin/mwc.yml index cca84d4963..9a3f322ce7 100644 --- a/src/bin/mwc.yml +++ b/src/bin/mwc.yml @@ -1,5 +1,5 @@ -name: grin -version: "4.1.0" +name: mwc +version: "5.3.0" about: Lightweight implementation of the MimbleWimble protocol. author: The MWC Team @@ -77,6 +77,12 @@ subcommands: long: peer required: true takes_value: true + - resetchainhead: + about: Resets the local chain head + args: + - hash: + help: The header hash to reset to + required: true - verify-chain: about: Trigger a verication of the rangeproofs, kernel signatures and excesses. args: @@ -85,3 +91,9 @@ subcommands: short: f long: fast takes_value: false + - invalidateheader: + about: Adds header hash to denylist + args: + - hash: + help: The header hash to invalidate + required: true diff --git a/src/bin/tui/constants.rs b/src/bin/tui/constants.rs index cc381c792a..75c31597ea 100644 --- a/src/bin/tui/constants.rs +++ b/src/bin/tui/constants.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/src/bin/tui/logs.rs b/src/bin/tui/logs.rs index eeae777217..16e7d547eb 100644 --- a/src/bin/tui/logs.rs +++ b/src/bin/tui/logs.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -13,7 +13,7 @@ // limitations under the License. use cursive::theme::{BaseColor, Color, ColorStyle}; -use cursive::traits::Identifiable; +use cursive::traits::Nameable; use cursive::view::View; use cursive::views::ResizedView; use cursive::{Cursive, Printer}; diff --git a/src/bin/tui/menu.rs b/src/bin/tui/menu.rs index 3711ff4b18..41de91adf3 100644 --- a/src/bin/tui/menu.rs +++ b/src/bin/tui/menu.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,7 +17,7 @@ use cursive::align::HAlign; use cursive::direction::Orientation; use cursive::event::Key; -use cursive::view::Identifiable; +use cursive::view::Nameable; use cursive::view::View; use cursive::views::{ LinearLayout, OnEventView, ResizedView, SelectView, StackView, TextView, ViewRef, diff --git a/src/bin/tui/mining.rs b/src/bin/tui/mining.rs index 75e0f675d6..6ab7f0c929 100644 --- a/src/bin/tui/mining.rs +++ b/src/bin/tui/mining.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,10 +16,10 @@ use std::cmp::Ordering; -use crate::tui::chrono::prelude::{DateTime, NaiveDateTime, Utc}; +use chrono::prelude::{DateTime, Utc}; use cursive::direction::Orientation; use cursive::event::Key; -use cursive::traits::{Boxable, Identifiable}; +use cursive::traits::{Nameable, Resizable}; use cursive::view::View; use cursive::views::{ Button, Dialog, LinearLayout, OnEventView, Panel, ResizedView, StackView, TextView, @@ -34,7 +34,7 @@ use crate::tui::constants::{ use crate::tui::types::TUIStatusListener; use crate::servers::{DiffBlock, ServerStats, WorkerStats}; -use crate::tui::table::{TableView, TableViewItem}; +use cursive_table_view::{TableView, TableViewItem}; #[derive(Copy, Clone, PartialEq, Eq, Hash)] enum StratumWorkerColumn { @@ -51,7 +51,7 @@ enum StratumWorkerColumn { impl StratumWorkerColumn { fn _as_str(&self) -> &str { match *self { - StratumWorkerColumn::Id => "Worker ID", + StratumWorkerColumn::Id => "ID", StratumWorkerColumn::IsConnected => "Connected", StratumWorkerColumn::LastSeen => "Last Seen", StratumWorkerColumn::PowDifficulty => "PowDifficulty", @@ -65,14 +65,15 @@ impl StratumWorkerColumn { impl TableViewItem for WorkerStats { fn to_column(&self, column: StratumWorkerColumn) -> String { - let naive_datetime = NaiveDateTime::from_timestamp( + let datetime = DateTime::from_timestamp( self.last_seen .duration_since(time::UNIX_EPOCH) .unwrap() .as_secs() as i64, 0, - ); - let datetime: DateTime = DateTime::from_utc(naive_datetime, Utc); + ) + .unwrap_or_default() + .to_utc(); match column { StratumWorkerColumn::Id => self.id.clone(), @@ -86,19 +87,21 @@ impl TableViewItem for WorkerStats { } } - fn cmp(&self, _other: &Self, column: StratumWorkerColumn) -> Ordering + fn cmp(&self, other: &Self, column: StratumWorkerColumn) -> Ordering where Self: Sized, { match column { - StratumWorkerColumn::Id => Ordering::Equal, - StratumWorkerColumn::IsConnected => Ordering::Equal, - StratumWorkerColumn::LastSeen => Ordering::Equal, - StratumWorkerColumn::PowDifficulty => Ordering::Equal, - StratumWorkerColumn::NumAccepted => Ordering::Equal, - StratumWorkerColumn::NumRejected => Ordering::Equal, - StratumWorkerColumn::NumStale => Ordering::Equal, - StratumWorkerColumn::NumBlocksFound => Ordering::Equal, + StratumWorkerColumn::Id => self.id.cmp(&other.id), + StratumWorkerColumn::IsConnected => self.is_connected.cmp(&other.is_connected), + StratumWorkerColumn::LastSeen => self.last_seen.cmp(&other.last_seen), + StratumWorkerColumn::PowDifficulty => self.pow_difficulty.cmp(&other.pow_difficulty), + StratumWorkerColumn::NumAccepted => self.num_accepted.cmp(&other.num_accepted), + StratumWorkerColumn::NumRejected => self.num_rejected.cmp(&other.num_rejected), + StratumWorkerColumn::NumStale => self.num_stale.cmp(&other.num_stale), + StratumWorkerColumn::NumBlocksFound => { + self.num_blocks_found.cmp(&other.num_blocks_found) + } } } } @@ -106,9 +109,7 @@ impl TableViewItem for WorkerStats { enum DiffColumn { Height, Hash, - PoWType, Difficulty, - SecondaryScaling, Time, Duration, } @@ -118,9 +119,7 @@ impl DiffColumn { match *self { DiffColumn::Height => "Height", DiffColumn::Hash => "Hash", - DiffColumn::PoWType => "Type", DiffColumn::Difficulty => "Network Difficulty", - DiffColumn::SecondaryScaling => "Sec. Scaling", DiffColumn::Time => "Block Time", DiffColumn::Duration => "Duration", } @@ -129,37 +128,29 @@ impl DiffColumn { impl TableViewItem for DiffBlock { fn to_column(&self, column: DiffColumn) -> String { - let naive_datetime = NaiveDateTime::from_timestamp(self.time as i64, 0); - let datetime: DateTime = DateTime::from_utc(naive_datetime, Utc); - let pow_type = if self.is_secondary { - String::from("Secondary") - } else { - String::from("Primary") - }; + let datetime: DateTime = DateTime::from_timestamp(self.time as i64, 0) + .unwrap_or_default() + .to_utc(); match column { DiffColumn::Height => self.block_height.to_string(), DiffColumn::Hash => self.block_hash.to_string(), - DiffColumn::PoWType => pow_type, DiffColumn::Difficulty => self.difficulty.to_string(), - DiffColumn::SecondaryScaling => self.secondary_scaling.to_string(), DiffColumn::Time => format!("{}", datetime), DiffColumn::Duration => format!("{}s", self.duration), } } - fn cmp(&self, _other: &Self, column: DiffColumn) -> Ordering + fn cmp(&self, other: &Self, column: DiffColumn) -> Ordering where Self: Sized, { match column { - DiffColumn::Height => Ordering::Equal, - DiffColumn::Hash => Ordering::Equal, - DiffColumn::PoWType => Ordering::Equal, - DiffColumn::Difficulty => Ordering::Equal, - DiffColumn::SecondaryScaling => Ordering::Equal, - DiffColumn::Time => Ordering::Equal, - DiffColumn::Duration => Ordering::Equal, + DiffColumn::Height => self.block_height.cmp(&other.block_height), + DiffColumn::Hash => self.block_hash.cmp(&other.block_hash), + DiffColumn::Difficulty => self.difficulty.cmp(&other.difficulty), + DiffColumn::Time => self.time.cmp(&other.time), + DiffColumn::Duration => self.duration.cmp(&other.duration), } } } @@ -186,29 +177,31 @@ impl TUIMiningView { .child(Panel::new(devices_button)) .child(Panel::new(difficulty_button)); - let table_view = TableView::::new() - .column(StratumWorkerColumn::Id, "Worker ID", |c| c.width_percent(8)) + let mut table_view = TableView::::new() + .column(StratumWorkerColumn::Id, "ID", |c| c.width_percent(6)) .column(StratumWorkerColumn::IsConnected, "Connected", |c| { - c.width_percent(8) + c.width_percent(14) }) .column(StratumWorkerColumn::LastSeen, "Last Seen", |c| { - c.width_percent(16) - }) - .column(StratumWorkerColumn::PowDifficulty, "Pow Difficulty", |c| { - c.width_percent(12) + c.width_percent(20) }) - .column(StratumWorkerColumn::NumAccepted, "Num Accepted", |c| { + .column(StratumWorkerColumn::PowDifficulty, "Difficulty", |c| { c.width_percent(10) }) - .column(StratumWorkerColumn::NumRejected, "Num Rejected", |c| { - c.width_percent(10) + .column(StratumWorkerColumn::NumAccepted, "Accepted", |c| { + c.width_percent(5) }) - .column(StratumWorkerColumn::NumStale, "Num Stale", |c| { - c.width_percent(10) + .column(StratumWorkerColumn::NumRejected, "Rejected", |c| { + c.width_percent(5) + }) + .column(StratumWorkerColumn::NumStale, "Stale", |c| { + c.width_percent(5) }) .column(StratumWorkerColumn::NumBlocksFound, "Blocks Found", |c| { - c.width_percent(10) - }); + c.width_percent(35) + }) + .default_column(StratumWorkerColumn::IsConnected); + table_view.sort_by(StratumWorkerColumn::IsConnected, Ordering::Greater); let status_view = LinearLayout::new(Orientation::Vertical) .child( @@ -229,15 +222,15 @@ impl TUIMiningView { ) .child( LinearLayout::new(Orientation::Horizontal) - .child(TextView::new(" ").with_name("stratum_network_difficulty_status")), + .child(TextView::new(" ").with_name("stratum_blocks_found_status")), ) .child( LinearLayout::new(Orientation::Horizontal) - .child(TextView::new(" ").with_name("stratum_network_hashrate")), + .child(TextView::new(" ").with_name("stratum_network_difficulty_status")), ) .child( LinearLayout::new(Orientation::Horizontal) - .child(TextView::new(" ").with_name("stratum_edge_bits_status")), + .child(TextView::new(" ").with_name("stratum_network_hashrate")), ); let mining_device_view = LinearLayout::new(Orientation::Vertical) @@ -271,17 +264,14 @@ impl TUIMiningView { ); let diff_table_view = TableView::::new() - .column(DiffColumn::Height, "Height", |c| c.width_percent(10)) - .column(DiffColumn::Hash, "Hash", |c| c.width_percent(10)) - .column(DiffColumn::PoWType, "Type", |c| c.width_percent(10)) + .column(DiffColumn::Height, "Height", |c| c.width_percent(15)) + .column(DiffColumn::Hash, "Hash", |c| c.width_percent(15)) .column(DiffColumn::Difficulty, "Network Difficulty", |c| { c.width_percent(15) }) - .column(DiffColumn::SecondaryScaling, "Sec. Scaling", |c| { - c.width_percent(10) - }) - .column(DiffColumn::Time, "Block Time", |c| c.width_percent(25)) - .column(DiffColumn::Duration, "Duration", |c| c.width_percent(25)); + .column(DiffColumn::Time, "Block Time", |c| c.width_percent(30)) + .column(DiffColumn::Duration, "Duration", |c| c.width_percent(25)) + .default_column(DiffColumn::Height); let mining_difficulty_view = LinearLayout::new(Orientation::Vertical) .child(diff_status_view) @@ -338,12 +328,6 @@ impl TUIStatusListener for TUIMiningView { }, ); let stratum_stats = &stats.stratum_stats; - let stratum_network_hashrate = format!( - "Network Hashrate: {:.*}", - 2, - stratum_stats - .network_hashrate(stratum_stats.block_height.load(atomic::Ordering::Relaxed)) - ); let worker_stats = stratum_stats.get_worker_stats(); let stratum_enabled = format!( "Mining server enabled: {}", @@ -353,24 +337,39 @@ impl TUIStatusListener for TUIMiningView { "Mining server running: {}", stratum_stats.is_running.load(atomic::Ordering::Relaxed) ); - let stratum_num_workers = format!( - "Number of workers: {}", - stratum_stats.num_workers.load(atomic::Ordering::Relaxed) - ); - let stratum_block_height = format!( - "Solving Block Height: {}", - stratum_stats.block_height.load(atomic::Ordering::Relaxed) - ); - let stratum_network_difficulty = format!( - "Network Difficulty: {}", - stratum_stats - .network_difficulty - .load(atomic::Ordering::Relaxed) - ); - let stratum_edge_bits = format!( - "Cuckoo Size: {}", - stratum_stats.edge_bits.load(atomic::Ordering::Relaxed) + let num_workers = stratum_stats.num_workers.load(atomic::Ordering::Relaxed); + let stratum_num_workers = format!("Active workers: {}", num_workers); + let stratum_blocks_found = format!( + "Blocks Found: {}", + stratum_stats.blocks_found.load(atomic::Ordering::Relaxed) ); + let stratum_block_height = match num_workers { + 0 => "Solving Block Height: n/a".to_string(), + _ => format!( + "Solving Block Height: {}", + stratum_stats.block_height.load(atomic::Ordering::Relaxed) + ), + }; + let stratum_network_difficulty = match num_workers { + 0 => "Network Difficulty: n/a".to_string(), + _ => format!( + "Network Difficulty: {}", + stratum_stats + .network_difficulty + .load(atomic::Ordering::Relaxed) + ), + }; + let stratum_network_hashrate = match num_workers { + 0 => "Network Hashrate: n/a".to_string(), + _ => format!( + "Network Hashrate C{}: {:.*}", + stratum_stats.edge_bits.load(atomic::Ordering::Relaxed), + 2, + stratum_stats + .network_hashrate + .load(atomic::Ordering::Relaxed) + ), + }; c.call_on_name("stratum_config_status", |t: &mut TextView| { t.set_content(stratum_enabled); @@ -381,6 +380,9 @@ impl TUIStatusListener for TUIMiningView { c.call_on_name("stratum_num_workers_status", |t: &mut TextView| { t.set_content(stratum_num_workers); }); + c.call_on_name("stratum_blocks_found_status", |t: &mut TextView| { + t.set_content(stratum_blocks_found); + }); c.call_on_name("stratum_block_height_status", |t: &mut TextView| { t.set_content(stratum_block_height); }); @@ -390,13 +392,10 @@ impl TUIStatusListener for TUIMiningView { c.call_on_name("stratum_network_hashrate", |t: &mut TextView| { t.set_content(stratum_network_hashrate); }); - c.call_on_name("stratum_edge_bits_status", |t: &mut TextView| { - t.set_content(stratum_edge_bits); - }); let _ = c.call_on_name( TABLE_MINING_STATUS, |t: &mut TableView| { - t.set_items(worker_stats); + t.set_items_stable(worker_stats); }, ); } diff --git a/src/bin/tui/mod.rs b/src/bin/tui/mod.rs index 367db379c4..2cdf0c8865 100644 --- a/src/bin/tui/mod.rs +++ b/src/bin/tui/mod.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,17 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Grin TUI -use chrono; -use humansize; -// mod constants; mod logs; mod menu; mod mining; mod peers; mod status; -pub mod table; mod types; pub mod ui; mod version; diff --git a/src/bin/tui/peers.rs b/src/bin/tui/peers.rs index adb4db057f..ae862c95ec 100644 --- a/src/bin/tui/peers.rs +++ b/src/bin/tui/peers.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,19 +18,19 @@ use std::cmp::Ordering; use crate::servers::{PeerStats, ServerStats}; -use crate::tui::humansize::{file_size_opts::CONVENTIONAL, FileSize}; use chrono::prelude::*; +use humansize::{file_size_opts::CONVENTIONAL, FileSize}; use cursive::direction::Orientation; use cursive::event::Key; -use cursive::traits::{Boxable, Identifiable}; +use cursive::traits::{Nameable, Resizable}; use cursive::view::View; use cursive::views::{Dialog, LinearLayout, OnEventView, ResizedView, TextView}; use cursive::Cursive; use crate::tui::constants::{MAIN_MENU, TABLE_PEER_STATUS, VIEW_PEER_SYNC}; -use crate::tui::table::{TableView, TableViewItem}; use crate::tui::types::TUIStatusListener; +use cursive_table_view::{TableView, TableViewItem}; #[derive(Copy, Clone, PartialEq, Eq, Hash)] pub enum PeerColumn { @@ -41,6 +41,7 @@ pub enum PeerColumn { Direction, Version, UserAgent, + Capabilities, } impl PeerColumn { @@ -53,6 +54,7 @@ impl PeerColumn { PeerColumn::TotalDifficulty => "Total Difficulty", PeerColumn::Direction => "Direction", PeerColumn::UserAgent => "User Agent", + PeerColumn::Capabilities => "Capabilities", } } } @@ -82,6 +84,7 @@ impl TableViewItem for PeerStats { PeerColumn::Direction => self.direction.clone(), PeerColumn::Version => format!("{}", self.version), PeerColumn::UserAgent => self.user_agent.clone(), + PeerColumn::Capabilities => format!("{}", self.capabilities.bits()), } } @@ -115,6 +118,10 @@ impl TableViewItem for PeerStats { PeerColumn::Direction => self.direction.cmp(&other.direction).then(sort_by_addr()), PeerColumn::Version => self.version.cmp(&other.version).then(sort_by_addr()), PeerColumn::UserAgent => self.user_agent.cmp(&other.user_agent).then(sort_by_addr()), + PeerColumn::Capabilities => self + .capabilities + .cmp(&other.capabilities) + .then(sort_by_addr()), } } } @@ -133,7 +140,8 @@ impl TUIPeerView { .column(PeerColumn::TotalDifficulty, "Total Difficulty", |c| { c.width_percent(24) }) - .column(PeerColumn::Version, "Proto", |c| c.width_percent(6)) + .column(PeerColumn::Version, "Proto", |c| c.width_percent(4)) + .column(PeerColumn::Capabilities, "Capab", |c| c.width_percent(4)) .column(PeerColumn::UserAgent, "User Agent", |c| c.width_percent(18)); let peer_status_view = ResizedView::with_full_screen( LinearLayout::new(Orientation::Vertical) @@ -182,7 +190,7 @@ impl TUIStatusListener for TUIPeerView { let _ = c.call_on_name( TABLE_PEER_STATUS, |t: &mut TableView| { - t.set_items(stats.peer_stats.clone()); + t.set_items_stable(stats.peer_stats.clone()); }, ); let _ = c.call_on_name("peers_total", |t: &mut TextView| { diff --git a/src/bin/tui/status.rs b/src/bin/tui/status.rs index 7509a27051..8538e63458 100644 --- a/src/bin/tui/status.rs +++ b/src/bin/tui/status.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,7 +16,7 @@ use chrono::prelude::Utc; use cursive::direction::Orientation; -use cursive::traits::Identifiable; +use cursive::traits::Nameable; use cursive::view::View; use cursive::views::{LinearLayout, ResizedView, TextView}; use cursive::Cursive; @@ -39,21 +39,43 @@ impl TUIStatusView { SyncStatus::NoSync => Cow::Borrowed("Running"), SyncStatus::AwaitingPeers(_) => Cow::Borrowed("Waiting for peers"), SyncStatus::HeaderSync { - current_height, + sync_head, highest_height, + .. } => { let percent = if highest_height == 0 { 0 } else { - current_height * 100 / highest_height + sync_head.height * 100 / highest_height }; Cow::Owned(format!("Sync step 1/7: Downloading headers: {}%", percent)) } + SyncStatus::TxHashsetPibd { + aborted: _, + errored: _, + completed_leaves, + leaves_required, + completed_to_height: _, + required_height: _, + } => { + let percent = if completed_leaves == 0 { + 0 + } else { + completed_leaves * 100 / leaves_required + }; + Cow::Owned(format!( + "Sync step 2/7: Downloading Tx state (PIBD) - {} / {} entries - {}%", + completed_leaves, leaves_required, percent + )) + } SyncStatus::TxHashsetDownload(stat) => { if stat.total_size > 0 { let percent = stat.downloaded_size * 100 / stat.total_size; - let start = stat.prev_update_time.timestamp_nanos(); - let fin = Utc::now().timestamp_nanos(); + let start = stat + .prev_update_time + .timestamp_nanos_opt() + .unwrap_or_default(); + let fin = Utc::now().timestamp_nanos_opt().unwrap_or_default(); let dur_ms = (fin - start) as f64 * NANO_TO_MILLIS; Cow::Owned(format!("Sync step 2/7: Downloading {}(MB) chain state for state sync: {}% at {:.1?}(kB/s)", @@ -71,8 +93,31 @@ impl TUIStatusView { )) } } - SyncStatus::TxHashsetSetup => { - Cow::Borrowed("Sync step 3/7: Preparing chain state for validation") + SyncStatus::TxHashsetSetup { + headers, + headers_total, + kernel_pos, + kernel_pos_total, + } => { + if headers.is_some() && headers_total.is_some() { + let h = headers.unwrap(); + let ht = headers_total.unwrap(); + let percent = h * 100 / ht; + Cow::Owned(format!( + "Sync step 3/7: Preparing for validation (kernel history) - {}/{} - {}%", + h, ht, percent + )) + } else if kernel_pos.is_some() && kernel_pos_total.is_some() { + let k = kernel_pos.unwrap(); + let kt = kernel_pos_total.unwrap(); + let percent = k * 100 / kt; + Cow::Owned(format!( + "Sync step 3/7: Preparing for validation (kernel position) - {}/{} - {}%", + k, kt, percent + )) + } else { + Cow::Borrowed("Sync step 3/7: Preparing chain state for validation") + } } SyncStatus::TxHashsetRangeProofsValidation { rproofs, diff --git a/src/bin/tui/table.rs b/src/bin/tui/table.rs deleted file mode 100644 index b619792959..0000000000 --- a/src/bin/tui/table.rs +++ /dev/null @@ -1,1066 +0,0 @@ -// Copyright 2020 The Grin Developers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Copyright (c) 2015-2017 Ivo Wetzel -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! Adapted from https://github.com/behnam/rust-cursive-table-view -//! A basic table view implementation for [cursive](https://crates.io/crates/cursive). - -#![deny( - missing_docs, - missing_copy_implementations, - trivial_casts, - trivial_numeric_casts, - unsafe_code, - unused_import_braces, - unused_qualifications -)] - -// Crate Dependencies --------------------------------------------------------- -use cursive; - -// STD Dependencies ----------------------------------------------------------- -use std::cmp::{self, Ordering}; -use std::collections::HashMap; -use std::hash::Hash; -use std::rc::Rc; - -// External Dependencies ------------------------------------------------------ -use cursive::align::HAlign; -use cursive::direction::Direction; -use cursive::event::{Callback, Event, EventResult, Key}; -use cursive::theme::ColorStyle; -use cursive::theme::PaletteColor::{Highlight, HighlightInactive, Primary}; -use cursive::vec::Vec2; -use cursive::view::{ScrollBase, View}; -use cursive::With; -use cursive::{Cursive, Printer}; - -/// A trait for displaying and sorting items inside a -/// [`TableView`](struct.TableView.html). -pub trait TableViewItem: Clone + Sized -where - H: Eq + Hash + Copy + Clone + 'static, -{ - /// Method returning a string representation of the item for the - /// specified column from type `H`. - fn to_column(&self, column: H) -> String; - - /// Method comparing two items via their specified column from type `H`. - fn cmp(&self, other: &Self, column: H) -> Ordering - where - Self: Sized; -} - -/// View to select an item among a list, supporting multiple columns for -/// sorting. -/// -/// # Examples -/// -/// ```rust -/// # extern crate cursive; -/// # extern crate cursive_table_view; -/// # use std::cmp::Ordering; -/// # use cursive_table_view::{TableView, TableViewItem}; -/// # use cursive::align::HAlign; -/// # fn main() { -/// // Provide a type for the table's columns -/// #[derive(Copy, Clone, PartialEq, Eq, Hash)] -/// enum BasicColumn { -/// Name, -/// Count, -/// Rate -/// } -/// -/// // Define the item type -/// #[derive(Clone, Debug)] -/// struct Foo { -/// name: String, -/// count: usize, -/// rate: usize -/// } -/// -/// impl TableViewItem for Foo { -/// -/// fn to_column(&self, column: BasicColumn) -> String { -/// match column { -/// BasicColumn::Name => self.name.to_string(), -/// BasicColumn::Count => format!("{}", self.count), -/// BasicColumn::Rate => format!("{}", self.rate) -/// } -/// } -/// -/// fn cmp(&self, other: &Self, column: BasicColumn) -> Ordering where Self: Sized { -/// match column { -/// BasicColumn::Name => self.name.cmp(&other.name), -/// BasicColumn::Count => self.count.cmp(&other.count), -/// BasicColumn::Rate => self.rate.cmp(&other.rate) -/// } -/// } -/// -/// } -/// -/// // Configure the actual table -/// let table = TableView::::new() -/// .column(BasicColumn::Name, "Name", |c| c.width(20)) -/// .column(BasicColumn::Count, "Count", |c| c.align(HAlign::Center)) -/// .column(BasicColumn::Rate, "Rate", |c| { -/// c.ordering(Ordering::Greater).align(HAlign::Right).width(20) -/// }) -/// .default_column(BasicColumn::Name); -/// # } -/// ``` -pub struct TableView + PartialEq, H: Eq + Hash + Copy + Clone + 'static> { - enabled: bool, - scrollbase: ScrollBase, - last_size: Vec2, - - column_select: bool, - columns: Vec>, - column_indices: HashMap, - - focus: usize, - items: Vec, - rows_to_items: Vec, - - on_sort: Option>, - // TODO Pass drawing offsets into the handlers so a popup menu - // can be created easily? - on_submit: Option>, - on_select: Option>, -} - -impl + PartialEq, H: Eq + Hash + Copy + Clone + 'static> TableView { - /// Creates a new empty `TableView` without any columns. - /// - /// A TableView should be accompanied by a enum of type `H` representing - /// the table columns. - pub fn new() -> Self { - Self { - enabled: true, - scrollbase: ScrollBase::new(), - last_size: Vec2::new(0, 0), - - column_select: false, - columns: Vec::new(), - column_indices: HashMap::new(), - - focus: 0, - items: Vec::new(), - rows_to_items: Vec::new(), - - on_sort: None, - on_submit: None, - on_select: None, - } - } - - /// Adds a column for the specified table column from type `H` along with - /// a title for its visual display. - /// - /// The provided callback can be used to further configure the - /// created [`TableColumn`](struct.TableColumn.html). - pub fn column, C: FnOnce(TableColumn) -> TableColumn>( - mut self, - column: H, - title: S, - callback: C, - ) -> Self { - self.column_indices.insert(column, self.columns.len()); - self.columns - .push(callback(TableColumn::new(column, title.into()))); - - // Make the first column the default one - if self.columns.len() == 1 { - self.default_column(column) - } else { - self - } - } - - /// Sets the initially active column of the table. - pub fn default_column(mut self, column: H) -> Self { - if self.column_indices.contains_key(&column) { - for c in &mut self.columns { - c.selected = c.column == column; - if c.selected { - c.order = c.default_order; - } else { - c.order = Ordering::Equal; - } - } - } - self - } - - /// Sorts the table using the specified table `column` and the passed - /// `order`. - pub fn sort_by(&mut self, column: H, order: Ordering) { - if self.column_indices.contains_key(&column) { - for c in &mut self.columns { - if c.column == column { - c.order = order; - } else { - c.order = Ordering::Equal; - } - } - } - - self.sort_items(column, order); - } - - /// Sorts the table using the currently active column and its - /// ordering. - pub fn sort(&mut self) { - if let Some((column, order)) = self.order() { - self.sort_items(column, order); - } - } - - /// Returns the currently active column that is used for sorting - /// along with its ordering. - /// - /// Might return `None` if there are currently no items in the table - /// and it has not been sorted yet. - pub fn order(&self) -> Option<(H, Ordering)> { - for c in &self.columns { - if c.order != Ordering::Equal { - return Some((c.column, c.order)); - } - } - None - } - - /// Disables this view. - /// - /// A disabled view cannot be selected. - pub fn disable(&mut self) { - self.enabled = false; - } - - /// Re-enables this view. - pub fn enable(&mut self) { - self.enabled = true; - } - - /// Enable or disable this view. - pub fn set_enabled(&mut self, enabled: bool) { - self.enabled = enabled; - } - - /// Returns `true` if this view is enabled. - pub fn is_enabled(&self) -> bool { - self.enabled - } - - /// Sets a callback to be used when a selected column is sorted by - /// pressing ``. - /// - /// # Example - /// - /// ```norun - /// rt(|siv: &mut Cursive, column: BasicColumn, order: Ordering| {}); - /// ``` - pub fn set_on_sort(&mut self, cb: F) - where - F: Fn(&mut Cursive, H, Ordering) + 'static, - { - self.on_sort = Some(Rc::new(move |s, h, o| cb(s, h, o))); - } - - /// Sets a callback to be used when a selected column is sorted by - /// pressing ``. - /// - /// Chainable variant. - /// - /// # Example - /// - /// ```norun - /// siv: &mut Cursive, column: BasicColumn, order: Ordering| {}); - /// ``` - pub fn on_sort(self, cb: F) -> Self - where - F: Fn(&mut Cursive, H, Ordering) + 'static, - { - self.with(|t| t.set_on_sort(cb)) - } - - /// Sets a callback to be used when `` is pressed while an item - /// is selected. - /// - /// Both the currently selected row and the index of the corresponding item - /// within the underlying storage vector will be given to the callback. - /// - /// # Example - /// - /// ```norun - /// bmit(|siv: &mut Cursive, row: usize, index: usize| {}); - /// ``` - pub fn set_on_submit(&mut self, cb: F) - where - F: Fn(&mut Cursive, usize, usize) + 'static, - { - self.on_submit = Some(Rc::new(move |s, row, index| cb(s, row, index))); - } - - /// Sets a callback to be used when `` is pressed while an item - /// is selected. - /// - /// Both the currently selected row and the index of the corresponding item - /// within the underlying storage vector will be given to the callback. - /// - /// Chainable variant. - /// - /// # Example - /// - /// ```norun - /// (|siv: &mut Cursive, row: usize, index: usize| {}); - /// ``` - pub fn on_submit(self, cb: F) -> Self - where - F: Fn(&mut Cursive, usize, usize) + 'static, - { - self.with(|t| t.set_on_submit(cb)) - } - - /// Sets a callback to be used when an item is selected. - /// - /// Both the currently selected row and the index of the corresponding item - /// within the underlying storage vector will be given to the callback. - /// - /// # Example - /// - /// ```norun - /// lect(|siv: &mut Cursive, row: usize, index: usize| {}); - /// ``` - pub fn set_on_select(&mut self, cb: F) - where - F: Fn(&mut Cursive, usize, usize) + 'static, - { - self.on_select = Some(Rc::new(move |s, row, index| cb(s, row, index))); - } - - /// Sets a callback to be used when an item is selected. - /// - /// Both the currently selected row and the index of the corresponding item - /// within the underlying storage vector will be given to the callback. - /// - /// Chainable variant. - /// - /// # Example - /// - /// ```norun - /// (|siv: &mut Cursive, row: usize, index: usize| {}); - /// ``` - pub fn on_select(self, cb: F) -> Self - where - F: Fn(&mut Cursive, usize, usize) + 'static, - { - self.with(|t| t.set_on_select(cb)) - } - - /// Removes all items from this view. - pub fn clear(&mut self) { - self.items.clear(); - self.rows_to_items.clear(); - self.focus = 0; - } - - /// Returns the number of items in this table. - pub fn len(&self) -> usize { - self.items.len() - } - - /// Returns `true` if this table has no items. - pub fn is_empty(&self) -> bool { - self.items.is_empty() - } - - /// Returns the index of the currently selected table row. - pub fn row(&self) -> Option { - if self.items.is_empty() { - None - } else { - Some(self.focus) - } - } - - /// Selects the row at the specified index. - pub fn set_selected_row(&mut self, row_index: usize) { - self.focus = row_index; - self.scrollbase.scroll_to(row_index); - } - - /// Selects the row at the specified index. - /// - /// Chainable variant. - pub fn selected_row(self, row_index: usize) -> Self { - self.with(|t| t.set_selected_row(row_index)) - } - - /// Sets the contained items of the table. - /// - /// The currently active sort order is preserved and will be applied to all - /// items. The selected item will also be preserved. - pub fn set_items(&mut self, items: Vec) { - let mut new_location = 0; - if let Some(old_item_location) = self.item() { - let old_item = self.items.get(old_item_location).unwrap(); - for (i, new_item) in items.iter().enumerate() { - if old_item == new_item { - new_location = i; - break; - } - } - } - - self.items = items; - self.rows_to_items = Vec::with_capacity(self.items.len()); - - for i in 0..self.items.len() { - self.rows_to_items.push(i); - } - - if let Some((column, order)) = self.order() { - self.sort_by(column, order); - } - - self.scrollbase - .set_heights(self.last_size.y.saturating_sub(2), self.rows_to_items.len()); - - self.set_selected_item(new_location); - } - - /// Sets the contained items of the table. - /// - /// The order of the items will be preserved even when the table is sorted. - /// - /// Chainable variant. - pub fn items(self, items: Vec) -> Self { - self.with(|t| t.set_items(items)) - } - - /// Returns a immutable reference to the item at the specified index - /// within the underlying storage vector. - pub fn borrow_item(&mut self, index: usize) -> Option<&T> { - self.items.get(index) - } - - /// Returns a mutable reference to the item at the specified index within - /// the underlying storage vector. - pub fn borrow_item_mut(&mut self, index: usize) -> Option<&mut T> { - self.items.get_mut(index) - } - - /// Returns a immutable reference to the items contained within the table. - pub fn borrow_items(&mut self) -> &[T] { - &self.items - } - - /// Returns a mutable reference to the items contained within the table. - /// - /// Can be used to modify the items in place. - pub fn borrow_items_mut(&mut self) -> &mut [T] { - &mut self.items - } - - /// Returns the index of the currently selected item within the underlying - /// storage vector. - pub fn item(&self) -> Option { - if self.items.is_empty() || self.focus >= self.rows_to_items.len() { - None - } else { - Some(self.rows_to_items[self.focus]) - } - } - - /// Selects the item at the specified index within the underlying storage - /// vector. - pub fn set_selected_item(&mut self, item_index: usize) { - // TODO optimize the performance for very large item lists - if item_index < self.items.len() { - for (row, item) in self.rows_to_items.iter().enumerate() { - if *item == item_index { - self.focus = row; - self.scrollbase.scroll_to(row); - break; - } - } - } - } - - /// Selects the item at the specified index within the underlying storage - /// vector. - /// - /// Chainable variant. - pub fn selected_item(self, item_index: usize) -> Self { - self.with(|t| t.set_selected_item(item_index)) - } - - /// Inserts a new item into the table. - /// - /// The currently active sort order is preserved and will be applied to the - /// newly inserted item. - pub fn insert_item(&mut self, item: T) { - self.items.push(item); - self.rows_to_items.push(self.items.len()); - - self.scrollbase - .set_heights(self.last_size.y.saturating_sub(2), self.rows_to_items.len()); - - if let Some((column, order)) = self.order() { - self.sort_by(column, order); - } - } - - /// Removes the item at the specified index within the underlying storage - /// vector and returns it. - pub fn remove_item(&mut self, item_index: usize) -> Option { - if item_index < self.items.len() { - // Move the selection if the currently selected item gets removed - if let Some(selected_index) = self.item() { - if selected_index == item_index { - self.focus_up(1); - } - } - - // Remove the sorted reference to the item - self.rows_to_items.retain(|i| *i != item_index); - - // Adjust remaining references - for ref_index in &mut self.rows_to_items { - if *ref_index > item_index { - *ref_index -= 1; - } - } - - // Update scroll height to prevent out of index drawing - self.scrollbase - .set_heights(self.last_size.y.saturating_sub(2), self.rows_to_items.len()); - - // Remove actual item from the underlying storage - Some(self.items.remove(item_index)) - } else { - None - } - } - - /// Removes all items from the underlying storage and returns them. - pub fn take_items(&mut self) -> Vec { - self.scrollbase - .set_heights(self.last_size.y.saturating_sub(2), 0); - self.set_selected_row(0); - self.rows_to_items.clear(); - self.items.drain(0..).collect() - } -} - -impl + PartialEq, H: Eq + Hash + Copy + Clone + 'static> TableView { - fn draw_columns, &TableColumn)>( - &self, - printer: &Printer<'_, '_>, - sep: &str, - callback: C, - ) { - let mut column_offset = 0; - let column_count = self.columns.len(); - for (index, column) in self.columns.iter().enumerate() { - let printer = &printer.offset((column_offset, 0)).focused(true); - - callback(printer, column); - - if index < column_count - 1 { - printer.print((column.width + 1, 0), sep); - } - - column_offset += column.width + 3; - } - } - - fn sort_items(&mut self, column: H, order: Ordering) { - if !self.is_empty() { - let old_item = self.item(); - - let mut rows_to_items = self.rows_to_items.clone(); - rows_to_items.sort_by(|a, b| { - if order == Ordering::Less { - self.items[*a].cmp(&self.items[*b], column) - } else { - self.items[*b].cmp(&self.items[*a], column) - } - }); - self.rows_to_items = rows_to_items; - if let Some(o) = old_item { - self.set_selected_item(o) - } - } - } - - fn draw_item(&self, printer: &Printer<'_, '_>, i: usize) { - self.draw_columns(printer, "┆ ", |printer, column| { - let value = self.items[self.rows_to_items[i]].to_column(column.column); - column.draw_row(printer, value.as_str()); - }); - } - - fn focus_up(&mut self, n: usize) { - self.focus -= cmp::min(self.focus, n); - } - - fn focus_down(&mut self, n: usize) { - self.focus = cmp::min(self.focus + n, self.items.len() - 1); - } - - fn active_column(&self) -> usize { - self.columns.iter().position(|c| c.selected).unwrap_or(0) - } - - fn column_cancel(&mut self) { - self.column_select = false; - for column in &mut self.columns { - column.selected = column.order != Ordering::Equal; - } - } - - fn column_next(&mut self) -> bool { - let column = self.active_column(); - if column < self.columns.len() - 1 { - self.columns[column].selected = false; - self.columns[column + 1].selected = true; - true - } else { - false - } - } - - fn column_prev(&mut self) -> bool { - let column = self.active_column(); - if column > 0 { - self.columns[column].selected = false; - self.columns[column - 1].selected = true; - true - } else { - false - } - } - - fn column_select(&mut self) { - let next = self.active_column(); - let column = self.columns[next].column; - let current = self - .columns - .iter() - .position(|c| c.order != Ordering::Equal) - .unwrap_or(0); - - let order = if current != next { - self.columns[next].default_order - } else if self.columns[current].order == Ordering::Less { - Ordering::Greater - } else { - Ordering::Less - }; - - self.sort_by(column, order); - } -} - -impl + PartialEq + 'static, H: Eq + Hash + Copy + Clone + 'static> View - for TableView -{ - fn draw(&self, printer: &Printer<'_, '_>) { - self.draw_columns(printer, "╷ ", |printer, column| { - let color = if column.order != Ordering::Equal || column.selected { - if self.column_select && column.selected && self.enabled && printer.focused { - Highlight - } else { - HighlightInactive - } - } else { - Primary - }; - - printer.with_color(ColorStyle::from(color), |printer| { - column.draw_header(printer); - }); - }); - - self.draw_columns( - &printer.offset((0, 1)).focused(true), - "┴─", - |printer, column| { - printer.print_hline((0, 0), column.width + 1, "─"); - }, - ); - - let printer = &printer.offset((0, 2)).focused(true); - self.scrollbase.draw(printer, |printer, i| { - let color = if i == self.focus { - if !self.column_select && self.enabled && printer.focused { - Highlight - } else { - HighlightInactive - } - } else { - Primary - }; - - printer.with_color(ColorStyle::from(color), |printer| { - self.draw_item(printer, i); - }); - }); - } - - fn layout(&mut self, size: Vec2) { - if size == self.last_size { - return; - } - - let item_count = self.items.len(); - let column_count = self.columns.len(); - - // Split up all columns into sized / unsized groups - let (mut sized, mut usized): (Vec<&mut TableColumn>, Vec<&mut TableColumn>) = self - .columns - .iter_mut() - .partition(|c| c.requested_width.is_some()); - - // Subtract one for the separators between our columns (that's column_count - 1) - let mut available_width = size.x.saturating_sub(column_count.saturating_sub(1) * 3); - - // Reduce the with in case we are displaying a scrollbar - if size.y.saturating_sub(1) < item_count { - available_width = available_width.saturating_sub(2); - } - - // Calculate widths for all requested columns - let mut remaining_width = available_width; - for column in &mut sized { - column.width = match *column.requested_width.as_ref().unwrap() { - TableColumnWidth::Percent(width) => cmp::min( - (size.x as f32 / 100.0 * width as f32).ceil() as usize, - remaining_width, - ), - TableColumnWidth::Absolute(width) => width, - }; - remaining_width = remaining_width.saturating_sub(column.width); - } - - // Spread the remaining with across the unsized columns - let remaining_columns = usized.len(); - for column in &mut usized { - column.width = (remaining_width as f32 / remaining_columns as f32).floor() as usize; - } - - self.scrollbase - .set_heights(size.y.saturating_sub(2), item_count); - self.last_size = size; - } - - fn take_focus(&mut self, _: Direction) -> bool { - self.enabled && !self.items.is_empty() - } - - fn on_event(&mut self, event: Event) -> EventResult { - if !self.enabled { - return EventResult::Ignored; - } - - let last_focus = self.focus; - match event { - Event::Key(Key::Right) => { - if self.column_select { - if !self.column_next() { - return EventResult::Ignored; - } - } else { - self.column_select = true; - } - } - Event::Key(Key::Left) => { - if self.column_select { - if !self.column_prev() { - return EventResult::Ignored; - } - } else { - self.column_select = true; - } - } - Event::Key(Key::Up) if self.focus > 0 || self.column_select => { - if self.column_select { - self.column_cancel(); - } else { - self.focus_up(1); - } - } - Event::Key(Key::Down) if self.focus + 1 < self.items.len() || self.column_select => { - if self.column_select { - self.column_cancel(); - } else { - self.focus_down(1); - } - } - Event::Key(Key::PageUp) => { - self.column_cancel(); - self.focus_up(10); - } - Event::Key(Key::PageDown) => { - self.column_cancel(); - self.focus_down(10); - } - Event::Key(Key::Home) => { - self.column_cancel(); - self.focus = 0; - } - Event::Key(Key::End) => { - self.column_cancel(); - self.focus = self.items.len() - 1; - } - Event::Key(Key::Enter) => { - if self.column_select { - self.column_select(); - - if self.on_sort.is_some() { - let c = &self.columns[self.active_column()]; - let column = c.column; - let order = c.order; - - let cb = self.on_sort.clone().unwrap(); - return EventResult::Consumed(Some(Callback::from_fn(move |s| { - cb(s, column, order) - }))); - } - } else if !self.is_empty() && self.on_submit.is_some() { - let cb = self.on_submit.clone().unwrap(); - let row = self.row().unwrap(); - let index = self.item().unwrap(); - return EventResult::Consumed(Some(Callback::from_fn(move |s| { - cb(s, row, index) - }))); - } - } - _ => return EventResult::Ignored, - } - - let focus = self.focus; - self.scrollbase.scroll_to(focus); - - if self.column_select { - EventResult::Consumed(None) - } else if !self.is_empty() && last_focus != focus { - let row = self.row().unwrap(); - let index = self.item().unwrap(); - EventResult::Consumed( - self.on_select - .clone() - .map(|cb| Callback::from_fn(move |s| cb(s, row, index))), - ) - } else { - EventResult::Ignored - } - } -} - -/// A type used for the construction of columns in a -/// [`TableView`](struct.TableView.html). -pub struct TableColumn { - column: H, - title: String, - selected: bool, - alignment: HAlign, - order: Ordering, - width: usize, - default_order: Ordering, - requested_width: Option, -} - -enum TableColumnWidth { - Percent(usize), - Absolute(usize), -} - -impl TableColumn { - /// Sets the default ordering of the column. - pub fn ordering(mut self, order: Ordering) -> Self { - self.default_order = order; - self - } - - /// Sets the horizontal text alignment of the column. - pub fn align(mut self, alignment: HAlign) -> Self { - self.alignment = alignment; - self - } - - /// Sets how many characters of width this column will try to occupy. - pub fn width(mut self, width: usize) -> Self { - self.requested_width = Some(TableColumnWidth::Absolute(width)); - self - } - - /// Sets what percentage of the width of the entire table this column will - /// try to occupy. - pub fn width_percent(mut self, width: usize) -> Self { - self.requested_width = Some(TableColumnWidth::Percent(width)); - self - } - - fn new(column: H, title: String) -> Self { - Self { - column: column, - title: title, - selected: false, - alignment: HAlign::Left, - order: Ordering::Equal, - width: 0, - default_order: Ordering::Less, - requested_width: None, - } - } - - fn draw_header(&self, printer: &Printer<'_, '_>) { - let order = match self.order { - Ordering::Less => "^", - Ordering::Greater => "v", - Ordering::Equal => " ", - }; - - let header = match self.alignment { - HAlign::Left => format!( - "{: format!( - "{:>width$} [{}]", - self.title, - order, - width = self.width.saturating_sub(4) - ), - HAlign::Center => format!( - "{:^width$} [{}]", - self.title, - order, - width = self.width.saturating_sub(4) - ), - }; - printer.print((0, 0), header.as_str()); - } - - fn draw_row(&self, printer: &Printer<'_, '_>, value: &str) { - let value = match self.alignment { - HAlign::Left => format!("{: format!("{:>width$} ", value, width = self.width), - HAlign::Center => format!("{:^width$} ", value, width = self.width), - }; - printer.print((0, 0), value.as_str()); - } -} - -#[cfg(test)] -mod test { - use crate::tui::peers::PeerColumn; - use crate::tui::table::TableView; - use chrono::Utc; - use grin_core::ser::ProtocolVersion; - use grin_servers::PeerStats; - use std::cmp::Ordering; - - #[test] - pub fn test_set_items_preserves_selected_item() { - let mut table = TableView::::new(); - let ps1 = PeerStats { - addr: "123.0.0.1".to_string(), - ..TestPeerStats::default() - }; - let ps2 = PeerStats { - addr: "123.0.0.2".to_string(), - ..TestPeerStats::default() - }; - - let mut items = vec![ps1, ps2]; - table.set_items(items.clone()); - assert_eq!(table.item().unwrap(), 0); - - items.reverse(); - table.set_items(items); - assert_eq!(table.item().unwrap(), 1); - } - - #[test] - pub fn test_set_items_preserves_order() { - let mut table = TableView::::new(); - let ps1 = PeerStats { - addr: "123.0.0.1".to_string(), - received_bytes_per_sec: 10, - ..TestPeerStats::default() - }; - let ps2 = PeerStats { - addr: "123.0.0.2".to_string(), - received_bytes_per_sec: 80, - ..TestPeerStats::default() - }; - - let items = vec![ps1, ps2]; - table.set_items(items); - assert_eq!(table.rows_to_items[0], 0); - table.sort_by(PeerColumn::UsedBandwidth, Ordering::Greater); - - assert_eq!(table.rows_to_items[0], 1); - } - - struct TestPeerStats(PeerStats); - - impl TestPeerStats { - fn default() -> PeerStats { - PeerStats { - state: "Connected".to_string(), - addr: "127.0.0.1".to_string(), - version: ProtocolVersion::local(), - user_agent: "".to_string(), - total_difficulty: 0, - height: 0, - direction: "Outbound".to_string(), - last_seen: Utc::now(), - sent_bytes_per_sec: 0, - received_bytes_per_sec: 0, - } - } - } -} diff --git a/src/bin/tui/types.rs b/src/bin/tui/types.rs index b476e6a965..03a6be2a73 100644 --- a/src/bin/tui/types.rs +++ b/src/bin/tui/types.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/src/bin/tui/ui.rs b/src/bin/tui/ui.rs index b76bce0493..cb78e2e15b 100644 --- a/src/bin/tui/ui.rs +++ b/src/bin/tui/ui.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -23,14 +23,12 @@ use cursive::theme::PaletteColor::{ Background, Highlight, HighlightInactive, Primary, Shadow, View, }; use cursive::theme::{BaseColor, BorderStyle, Color, Theme}; -use cursive::traits::Boxable; -use cursive::traits::Identifiable; +use cursive::traits::{Nameable, Resizable}; use cursive::utils::markup::StyledString; use cursive::views::{ CircularFocus, Dialog, LinearLayout, Panel, SelectView, StackView, TextView, ViewRef, }; -use cursive::Cursive; -use cursive::CursiveExt; +use cursive::{CursiveRunnable, CursiveRunner}; use std::sync::mpsc; use std::{thread, time}; @@ -44,7 +42,7 @@ use grin_core::global; use grin_util::logger::LogEntry; pub struct UI { - cursive: Cursive, + cursive: CursiveRunner, ui_rx: mpsc::Receiver, ui_tx: mpsc::Sender, controller_tx: mpsc::Sender, @@ -72,7 +70,7 @@ impl UI { let (ui_tx, ui_rx) = mpsc::channel::(); let mut grin_ui = UI { - cursive: Cursive::default(), + cursive: cursive::default().into_runner(), ui_tx, ui_rx, controller_tx, @@ -125,9 +123,7 @@ impl UI { let controller_tx_clone = grin_ui.controller_tx.clone(); grin_ui.cursive.add_global_callback('q', move |c| { let content = StyledString::styled("Shutting down...", Color::Light(BaseColor::Yellow)); - c.add_layer(CircularFocus::wrap_tab(Dialog::around(TextView::new( - content, - )))); + c.add_layer(CircularFocus::new(Dialog::around(TextView::new(content))).wrap_tab()); controller_tx_clone .send(ControllerMessage::Shutdown) .unwrap(); @@ -198,7 +194,7 @@ impl Controller { pub fn run(&mut self, server: Server) { let stat_update_interval = 1; let mut next_stat_update = Utc::now().timestamp() + stat_update_interval; - let delay = time::Duration::from_millis(50); + let delay = time::Duration::from_millis(250); while self.ui.step() { if let Some(message) = self.rx.try_iter().next() { match message { diff --git a/src/bin/tui/version.rs b/src/bin/tui/version.rs index c6b2025f74..61bd0e1434 100644 --- a/src/bin/tui/version.rs +++ b/src/bin/tui/version.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ //! Version and build info use cursive::direction::Orientation; -use cursive::traits::Identifiable; +use cursive::traits::Nameable; use cursive::view::View; use cursive::views::{LinearLayout, ResizedView, TextView}; diff --git a/src/build/build.rs b/src/build/build.rs index 44e2674e67..e65905c4bb 100644 --- a/src/build/build.rs +++ b/src/build/build.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,6 @@ //! Build hooks to spit out version+build time info -use built; - use std::env; use std::path::{Path, PathBuf}; use std::process::Command; diff --git a/store/Cargo.toml b/store/Cargo.toml index 7bac99329e..104b436efa 100644 --- a/store/Cargo.toml +++ b/store/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "grin_store" -version = "4.4.2" +version = "5.3.2" authors = ["Grin Developers "] description = "Simple, private and scalable cryptocurrency implementation based on the MimbleWimble chain format." license = "Apache-2.0" @@ -11,19 +11,18 @@ edition = "2018" [dependencies] byteorder = "1" -croaring = "1.0.1" +croaring = "1.1" libc = "0.2" -failure = "0.1" -failure_derive = "0.1" lmdb-zero = "0.4.4" memmap = "0.7" tempfile = "3.1" serde = "1" serde_derive = "1" +thiserror = "1" log = "0.4" -grin_core = { path = "../core", version = "4.4.2" } -grin_util = { path = "../util", version = "4.4.2" } +grin_core = { path = "../core", version = "5.3.2" } +grin_util = { path = "../util", version = "5.3.2" } [dev-dependencies] chrono = "0.4.11" diff --git a/store/src/leaf_set.rs b/store/src/leaf_set.rs index fa80b2bf10..d0cc25531f 100644 --- a/store/src/leaf_set.rs +++ b/store/src/leaf_set.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -19,9 +19,9 @@ use std::path::{Path, PathBuf}; use croaring::{Bitmap, Portable}; -use crate::core::core::hash::Hashed; -use crate::core::core::pmmr; -use crate::core::core::BlockHeader; +use crate::grin_core::core::hash::Hashed; +use crate::grin_core::core::pmmr; +use crate::grin_core::core::BlockHeader; use crate::prune_list::PruneList; use crate::{read_bitmap, save_via_temp_file}; @@ -96,7 +96,7 @@ impl LeafSet { /// Only applicable for the output MMR. fn unpruned_pre_cutoff(&self, cutoff_pos: u64, prune_list: &PruneList) -> Bitmap { (1..=cutoff_pos) - .filter(|&x| pmmr::is_leaf(x) && !prune_list.is_pruned(x)) + .filter(|&x| pmmr::is_leaf(x - 1) && !prune_list.is_pruned(x - 1)) .map(|x| x as u32) .collect() } @@ -114,7 +114,7 @@ impl LeafSet { // First remove pos from leaf_set that were // added after the point we are rewinding to. - let to_remove = ((cutoff_pos + 1) as u32)..bitmap.maximum().unwrap_or(0); + let to_remove = ((cutoff_pos + 1) as u32)..=bitmap.maximum().unwrap_or(0); bitmap.remove_range(to_remove); // Then add back output pos to the leaf_set @@ -133,7 +133,7 @@ impl LeafSet { pub fn rewind(&mut self, cutoff_pos: u64, rewind_rm_pos: &Bitmap) { // First remove pos from leaf_set that were // added after the point we are rewinding to. - let to_remove = ((cutoff_pos + 1) as u32)..self.bitmap.maximum().unwrap_or(0); + let to_remove = ((cutoff_pos + 1) as u32)..=self.bitmap.maximum().unwrap_or(0); self.bitmap.remove_range(to_remove); // Then add back output pos to the leaf_set @@ -142,13 +142,13 @@ impl LeafSet { } /// Append a new position to the leaf_set. - pub fn add(&mut self, pos: u64) { - self.bitmap.add(pos as u32); + pub fn add(&mut self, pos0: u64) { + self.bitmap.add(1 + pos0 as u32); } /// Remove the provided position from the leaf_set. - pub fn remove(&mut self, pos: u64) { - self.bitmap.remove(pos as u32); + pub fn remove(&mut self, pos0: u64) { + self.bitmap.remove(1 + pos0 as u32); } /// Saves the utxo file tagged with block hash as filename suffix. @@ -187,8 +187,8 @@ impl LeafSet { } /// Whether the leaf_set includes the provided position. - pub fn includes(&self, pos: u64) -> bool { - self.bitmap.contains(pos as u32) + pub fn includes(&self, pos0: u64) -> bool { + self.bitmap.contains(1 + pos0 as u32) } /// Number of positions stored in the leaf_set. @@ -196,6 +196,11 @@ impl LeafSet { self.bitmap.cardinality() as usize } + /// Number of positions up to index n in the leaf set + pub fn n_unpruned_leaves_to_index(&self, to_index: u64) -> u64 { + self.bitmap.range_cardinality(0u32..to_index as u32) + } + /// Is the leaf_set empty. pub fn is_empty(&self) -> bool { self.len() == 0 diff --git a/store/src/lib.rs b/store/src/lib.rs index b7a0607fd4..215b43f70f 100644 --- a/store/src/lib.rs +++ b/store/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -22,11 +22,8 @@ #[macro_use] extern crate log; -use failure; #[macro_use] -extern crate failure_derive; -#[macro_use] -extern crate grin_core as core; +extern crate grin_core; extern crate grin_util as util; //use grin_core as core; diff --git a/store/src/lmdb.rs b/store/src/lmdb.rs index b187c0e944..506dabe1a6 100644 --- a/store/src/lmdb.rs +++ b/store/src/lmdb.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,43 +15,42 @@ //! Storage of core types using LMDB. use std::fs; -use std::marker; use std::sync::Arc; use lmdb_zero as lmdb; use lmdb_zero::traits::CreateCursor; use lmdb_zero::LmdbResultExt; -use crate::core::global; -use crate::core::ser::{self, ProtocolVersion}; +use crate::grin_core::global; +use crate::grin_core::ser::{self, DeserializationMode, ProtocolVersion}; use crate::util::RwLock; /// number of bytes to grow the database by when needed pub const ALLOC_CHUNK_SIZE_DEFAULT: usize = 134_217_728; //128 MB /// And for test mode, to avoid too much disk allocation on windows pub const ALLOC_CHUNK_SIZE_DEFAULT_TEST: usize = 1_048_576; //1 MB -const RESIZE_PERCENT: f32 = 0.5; +const RESIZE_PERCENT: f32 = 0.9; /// Want to ensure that each resize gives us at least this % /// of total space free -const RESIZE_MIN_TARGET_PERCENT: f32 = 0.25; +const RESIZE_MIN_TARGET_PERCENT: f32 = 0.65; /// Main error type for this lmdb -#[derive(Clone, Eq, PartialEq, Debug, Fail)] +#[derive(Clone, Eq, PartialEq, Debug, thiserror::Error)] pub enum Error { /// Couldn't find what we were looking for - #[fail(display = "DB Not Found Error: {}", _0)] + #[error("DB Not Found Error: {0}")] NotFoundErr(String), /// Wraps an error originating from LMDB - #[fail(display = "LMDB error, {} ", _0)] + #[error("LMDB error, {0}")] LmdbErr(lmdb::error::Error), /// Wraps a serialization error for Writeable or Readable - #[fail(display = "LMDB Serialization Error, {}", _0)] - SerErr(String), + #[error("LMDB Serialization Error, {0}")] + SerErr(ser::Error), /// File handling error - #[fail(display = "File handling Error")] + #[error("File handling Error: {0}")] FileErr(String), /// Other error - #[fail(display = "Other Error")] + #[error("Other Error: {0}")] OtherErr(String), } @@ -61,6 +60,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: ser::Error) -> Error { + Error::SerErr(e) + } +} + /// unwraps the inner option by converting the none case to a not found error pub fn option_to_not_found(res: Result, Error>, field_name: F) -> Result where @@ -245,82 +250,72 @@ impl Store { Ok(()) } - /// Gets a value from the db, provided its key - pub fn get_with(&self, key: &[u8], f: F) -> Result, Error> + /// Gets a value from the db, provided its key. + /// Deserializes the retrieved data using the provided function. + pub fn get_with( + &self, + key: &[u8], + access: &lmdb::ConstAccessor<'_>, + db: &lmdb::Database<'_>, + deserialize: F, + ) -> Result, Error> where - F: Fn(&[u8]) -> T, + F: Fn(&[u8], &[u8]) -> Result, { + let res: Option<&[u8]> = access.get(db, key).to_opt()?; + match res { + None => Ok(None), + Some(res) => deserialize(key, res).map(Some), + } + } + + /// Gets a `Readable` value from the db, provided its key. + /// Note: Creates a new read transaction so will *not* see any uncommitted data. + pub fn get_ser( + &self, + key: &[u8], + deser_mode: Option, + ) -> Result, Error> { let lock = self.db.read(); let db = lock .as_ref() .ok_or_else(|| Error::NotFoundErr("chain db is None".to_string()))?; let txn = lmdb::ReadTransaction::new(self.env.clone())?; let access = txn.access(); - let res = access.get(db, key); - res.map(f).to_opt().map_err(From::from) + let d = match deser_mode { + Some(d) => d, + _ => DeserializationMode::default(), + }; + self.get_with(key, &access, &db, |_, mut data| { + ser::deserialize(&mut data, self.protocol_version(), d).map_err(From::from) + }) } - /// Gets a `Readable` value from the db, provided its key. Encapsulates - /// serialization. - pub fn get_ser(&self, key: &[u8]) -> Result, Error> { + /// Whether the provided key exists + pub fn exists(&self, key: &[u8]) -> Result { let lock = self.db.read(); let db = lock .as_ref() .ok_or_else(|| Error::NotFoundErr("chain db is None".to_string()))?; let txn = lmdb::ReadTransaction::new(self.env.clone())?; let access = txn.access(); - self.get_ser_access(key, &access, db.clone()) - } - fn get_ser_access( - &self, - key: &[u8], - access: &lmdb::ConstAccessor<'_>, - db: Arc>, - ) -> Result, Error> { - let res: lmdb::error::Result<&[u8]> = access.get(&db, key); - match res.to_opt() { - Ok(Some(mut res)) => match ser::deserialize(&mut res, self.protocol_version()) { - Ok(res) => Ok(Some(res)), - Err(e) => Err(Error::SerErr(format!("{}", e))), - }, - Ok(None) => Ok(None), - Err(e) => Err(From::from(e)), - } + let res: Option<&lmdb::Ignore> = access.get(db, key).to_opt()?; + Ok(res.is_some()) } - /// Whether the provided key exists - pub fn exists(&self, key: &[u8]) -> Result { + /// Produces an iterator from the provided key prefix. + pub fn iter(&self, prefix: &[u8], deserialize: F) -> Result, Error> + where + F: Fn(&[u8], &[u8]) -> Result, + { let lock = self.db.read(); let db = lock .as_ref() .ok_or_else(|| Error::NotFoundErr("chain db is None".to_string()))?; - let txn = lmdb::ReadTransaction::new(self.env.clone())?; - let access = txn.access(); - let res: lmdb::error::Result<&lmdb::Ignore> = access.get(db, key); - res.to_opt().map(|r| r.is_some()).map_err(From::from) - } - - /// Produces an iterator of (key, value) pairs, where values are `Readable` types - /// moving forward from the provided key. - pub fn iter(&self, from: &[u8]) -> Result, Error> { - let db = self.db.read(); - let cloned_db = db.as_ref(); - let cloned_db = if cloned_db.is_some() { - cloned_db.unwrap().clone() - } else { - return Err(Error::NotFoundErr("error cloning db".to_string())); - }; let tx = Arc::new(lmdb::ReadTransaction::new(self.env.clone())?); - let cursor = Arc::new(tx.cursor(cloned_db)?); - Ok(SerIterator { - tx, - cursor, - seek: false, - prefix: from.to_vec(), - version: self.protocol_version(), - _marker: marker::PhantomData, - }) + let cursor = Arc::new(tx.cursor(db.clone())?); + Ok(PrefixIterator::new(tx, cursor, prefix, deserialize)) } /// Builds a new batch to be used with this store. @@ -375,40 +370,61 @@ impl<'a> Batch<'a> { let ser_value = ser::ser_vec(value, version); match ser_value { Ok(data) => self.put(key, &data), - Err(err) => Err(Error::SerErr(format!("{}", err))), + Err(err) => Err(err.into()), } } - /// gets a value from the db, provided its key - pub fn get_with(&self, key: &[u8], f: F) -> Result, Error> + /// Low-level access for retrieving data by key. + /// Takes a function for flexible deserialization. + pub fn get_with(&self, key: &[u8], deserialize: F) -> Result, Error> where - F: Fn(&[u8]) -> T, + F: Fn(&[u8], &[u8]) -> Result, { - self.store.get_with(key, f) - } - - /// Whether the provided key exists - pub fn exists(&self, key: &[u8]) -> Result { - self.store.exists(key) - } + let access = self.tx.access(); + let lock = self.store.db.read(); + let db = lock + .as_ref() + .ok_or_else(|| Error::NotFoundErr("chain db is None".to_string()))?; - /// Produces an iterator of `Readable` types moving forward from the - /// provided key. - pub fn iter(&self, from: &[u8]) -> Result, Error> { - self.store.iter(from) + self.store.get_with(key, &access, &db, deserialize) } - /// Gets a `Readable` value from the db, provided its key, taking the - /// content of the current batch into account. - pub fn get_ser(&self, key: &[u8]) -> Result, Error> { + /// Whether the provided key exists. + /// This is in the context of the current write transaction. + pub fn exists(&self, key: &[u8]) -> Result { let access = self.tx.access(); - let lock = self.store.db.read(); let db = lock .as_ref() .ok_or_else(|| Error::NotFoundErr("chain db is None".to_string()))?; + let res: Option<&lmdb::Ignore> = access.get(db, key).to_opt()?; + Ok(res.is_some()) + } - self.store.get_ser_access(key, &access, db.clone()) + /// Produces an iterator from the provided key prefix. + pub fn iter(&self, prefix: &[u8], deserialize: F) -> Result, Error> + where + F: Fn(&[u8], &[u8]) -> Result, + { + self.store.iter(prefix, deserialize) + } + + /// Gets a `Readable` value from the db by provided key and provided deserialization strategy. + pub fn get_ser( + &self, + key: &[u8], + deser_mode: Option, + ) -> Result, Error> { + let d = match deser_mode { + Some(d) => d, + _ => DeserializationMode::default(), + }; + self.get_with(key, |_, mut data| { + match ser::deserialize(&mut data, self.protocol_version(), d) { + Ok(res) => Ok(res), + Err(e) => Err(From::from(e)), + } + }) } /// Deletes a key/value pair from the db @@ -437,57 +453,61 @@ impl<'a> Batch<'a> { } } -/// An iterator that produces Readable instances back. Wraps the lower level -/// DBIterator and deserializes the returned values. -pub struct SerIterator +/// An iterator based on key prefix. +/// Caller is responsible for deserialization of the data. +pub struct PrefixIterator where - T: ser::Readable, + F: Fn(&[u8], &[u8]) -> Result, { tx: Arc>, cursor: Arc>, seek: bool, prefix: Vec, - version: ProtocolVersion, - _marker: marker::PhantomData, + deserialize: F, } -impl Iterator for SerIterator +impl Iterator for PrefixIterator where - T: ser::Readable, + F: Fn(&[u8], &[u8]) -> Result, { - type Item = (Vec, T); + type Item = T; - fn next(&mut self) -> Option<(Vec, T)> { + fn next(&mut self) -> Option { let access = self.tx.access(); - let kv = if self.seek { - Arc::get_mut(&mut self.cursor).unwrap().next(&access) + let cursor = Arc::get_mut(&mut self.cursor).expect("failed to get cursor"); + let kv: Result<(&[u8], &[u8]), _> = if self.seek { + cursor.next(&access) } else { self.seek = true; - Arc::get_mut(&mut self.cursor) - .unwrap() - .seek_range_k(&access, &self.prefix[..]) + cursor.seek_range_k(&access, &self.prefix[..]) }; - match kv { - Ok((k, v)) => self.deser_if_prefix_match(k, v), - Err(_) => None, - } + kv.ok() + .filter(|(k, _)| k.starts_with(self.prefix.as_slice())) + .map(|(k, v)| match (self.deserialize)(k, v) { + Ok(v) => Some(v), + Err(_) => None, + }) + .flatten() } } -impl SerIterator +impl PrefixIterator where - T: ser::Readable, + F: Fn(&[u8], &[u8]) -> Result, { - fn deser_if_prefix_match(&self, key: &[u8], value: &[u8]) -> Option<(Vec, T)> { - let plen = self.prefix.len(); - if plen == 0 || (key.len() >= plen && key[0..plen] == self.prefix[..]) { - if let Ok(value) = ser::deserialize(&mut &value[..], self.version) { - Some((key.to_vec(), value)) - } else { - None - } - } else { - None + /// Initialize a new prefix iterator. + pub fn new( + tx: Arc>, + cursor: Arc>, + prefix: &[u8], + deserialize: F, + ) -> PrefixIterator { + PrefixIterator { + tx, + cursor, + seek: false, + prefix: prefix.to_vec(), + deserialize, } } } diff --git a/store/src/pmmr.rs b/store/src/pmmr.rs index 9acdf7d07d..8cc4e3ac8e 100644 --- a/store/src/pmmr.rs +++ b/store/src/pmmr.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -16,10 +16,10 @@ use std::fs; use std::{io, time}; -use crate::core::core::hash::{Hash, Hashed}; -use crate::core::core::pmmr::{self, family, Backend}; -use crate::core::core::BlockHeader; -use crate::core::ser::{PMMRable, ProtocolVersion}; +use crate::grin_core::core::hash::{Hash, Hashed}; +use crate::grin_core::core::pmmr::{self, family, Backend}; +use crate::grin_core::core::BlockHeader; +use crate::grin_core::ser::{PMMRable, ProtocolVersion}; use crate::leaf_set::LeafSet; use crate::prune_list::PruneList; use crate::types::{AppendOnlyFile, DataFile, SizeEntry, SizeInfo}; @@ -65,69 +65,100 @@ pub struct PMMRBackend { impl Backend for PMMRBackend { /// Append the provided data and hashes to the backend storage. /// Add the new leaf pos to our leaf_set if this is a prunable MMR. - #[allow(unused_variables)] - fn append(&mut self, data: &T, hashes: Vec) -> Result<(), String> { + fn append(&mut self, data: &T, hashes: &[Hash]) -> Result<(), String> { let size = self .data_file .append(&data.as_elmt()) .map_err(|e| format!("Failed to append data to file. {}", e))?; - for h in &hashes { - self.hash_file - .append(h) - .map_err(|e| format!("Failed to append hash to file. {}", e))?; - } + self.hash_file + .extend_from_slice(hashes) + .map_err(|e| format!("Failed to append hash to file. {}", e))?; if self.prunable { // (Re)calculate the latest pos given updated size of data file // and the total leaf_shift, and add to our leaf_set. - let pos = pmmr::insertion_to_pmmr_index(size + self.prune_list.get_total_leaf_shift()); + let pos = + pmmr::insertion_to_pmmr_index(size + self.prune_list.get_total_leaf_shift() - 1); self.leaf_set.add(pos); } Ok(()) } - fn get_from_file(&self, position: u64) -> Option { - if self.is_compacted(position) { + // Supports appending a pruned subtree (single root hash) to an existing hash file. + // Update the prune_list "shift cache" to reflect the new pruned leaf pos in the subtree. + fn append_pruned_subtree(&mut self, hash: Hash, pos0: u64) -> Result<(), String> { + if !self.prunable { + return Err("Not prunable, cannot append pruned subtree.".into()); + } + + self.hash_file + .append(&hash) + .map_err(|e| format!("Failed to append subtree hash to file. {}", e))?; + + self.prune_list.append(pos0); + + Ok(()) + } + + fn append_hash(&mut self, hash: Hash) -> Result<(), String> { + self.hash_file + .append(&hash) + .map_err(|e| format!("Failed to append hash to file. {}", e))?; + Ok(()) + } + + fn get_from_file(&self, pos0: u64) -> Option { + if self.is_compacted(pos0) { return None; } - let shift = self.prune_list.get_shift(position); - self.hash_file.read(position - shift) + let shift = self.prune_list.get_shift(pos0); + self.hash_file.read(1 + pos0 - shift) + } + + fn get_peak_from_file(&self, pos0: u64) -> Option { + let shift = self.prune_list.get_shift(pos0); + self.hash_file.read(1 + pos0 - shift) } - fn get_data_from_file(&self, position: u64) -> Option { - if !pmmr::is_leaf(position) { + fn get_data_from_file(&self, pos0: u64) -> Option { + if !pmmr::is_leaf(pos0) { return None; } - if self.is_compacted(position) { + if self.is_compacted(pos0) { return None; } - let flatfile_pos = pmmr::n_leaves(position); - let shift = self.prune_list.get_leaf_shift(position); + let flatfile_pos = pmmr::n_leaves(pos0 + 1); + let shift = self.prune_list.get_leaf_shift(1 + pos0); self.data_file.read(flatfile_pos - shift) } /// Get the hash at pos. /// Return None if pos is a leaf and it has been removed (or pruned or /// compacted). - fn get_hash(&self, pos: u64) -> Option { - if self.prunable && pmmr::is_leaf(pos) && !self.leaf_set.includes(pos) { + fn get_hash(&self, pos0: u64) -> Option { + if self.prunable && pmmr::is_leaf(pos0) && !self.leaf_set.includes(pos0) { return None; } - self.get_from_file(pos) + self.get_from_file(pos0) } /// Get the data at pos. /// Return None if it has been removed or if pos is not a leaf node. - fn get_data(&self, pos: u64) -> Option { - if !pmmr::is_leaf(pos) { + fn get_data(&self, pos0: u64) -> Option { + if !pmmr::is_leaf(pos0) { return None; } - if self.prunable && !self.leaf_set.includes(pos) { + if self.prunable && !self.leaf_set.includes(pos0) { return None; } - self.get_data_from_file(pos) + self.get_data_from_file(pos0) + } + + /// Remove leaf from leaf set + fn remove_from_leaf_set(&mut self, pos0: u64) { + self.leaf_set.remove(pos0); } /// Returns an iterator over all the leaf positions. @@ -135,7 +166,7 @@ impl Backend for PMMRBackend { /// For a non-prunable PMMR this is *all* leaves (this is not yet implemented). fn leaf_pos_iter(&self) -> Box + '_> { if self.prunable { - Box::new(self.leaf_set.iter()) + Box::new(self.leaf_set.iter().map(|x| x - 1)) } else { panic!("leaf_pos_iter not implemented for non-prunable PMMR") } @@ -149,6 +180,14 @@ impl Backend for PMMRBackend { } } + fn n_unpruned_leaves_to_index(&self, to_index: u64) -> u64 { + if self.prunable { + self.leaf_set.n_unpruned_leaves_to_index(to_index) + } else { + pmmr::n_leaves(pmmr::insertion_to_pmmr_index(to_index)) + } + } + /// Returns an iterator over all the leaf insertion indices (0-indexed). /// If our pos are [1,2,4,5,8] (first 5 leaf pos) then our insertion indices are [0,1,2,3,4] fn leaf_idx_iter(&self, from_idx: u64) -> Box + '_> { @@ -157,7 +196,7 @@ impl Backend for PMMRBackend { // iterate, skipping everything prior to this // pass in from_idx=0 then we want to convert to pos=1 - let from_pos = pmmr::insertion_to_pmmr_index(from_idx + 1); + let from_pos = 1 + pmmr::insertion_to_pmmr_index(from_idx); if self.prunable { Box::new( @@ -179,21 +218,37 @@ impl Backend for PMMRBackend { } // Rewind the hash file accounting for pruned/compacted pos - let shift = self.prune_list.get_shift(position); + let shift = if position == 0 { + 0 + } else { + self.prune_list.get_shift(position - 1) + }; self.hash_file.rewind(position - shift); // Rewind the data file accounting for pruned/compacted pos let flatfile_pos = pmmr::n_leaves(position); - let leaf_shift = self.prune_list.get_leaf_shift(position); + let leaf_shift = if position == 0 { + 0 + } else { + self.prune_list.get_leaf_shift(position) + }; self.data_file.rewind(flatfile_pos - leaf_shift); Ok(()) } + fn reset_prune_list(&mut self) { + let bitmap = Bitmap::new(); + self.prune_list = PruneList::new(Some(self.data_dir.join(PMMR_PRUN_FILE)), bitmap); + if let Err(e) = self.prune_list.flush() { + error!("Flushing reset prune list: {}", e); + } + } + /// Remove by insertion position. - fn remove(&mut self, pos: u64) -> Result<(), String> { + fn remove(&mut self, pos0: u64) -> Result<(), String> { assert!(self.prunable, "Remove on non-prunable MMR"); - self.leaf_set.remove(pos); + self.leaf_set.remove(pos0); Ok(()) } @@ -282,16 +337,23 @@ impl PMMRBackend { }) } - fn is_pruned(&self, pos: u64) -> bool { - self.prune_list.is_pruned(pos) + fn is_pruned(&self, pos0: u64) -> bool { + self.prune_list.is_pruned(pos0) } - fn is_pruned_root(&self, pos: u64) -> bool { - self.prune_list.is_pruned_root(pos) + fn is_pruned_root(&self, pos0: u64) -> bool { + self.prune_list.is_pruned_root(pos0) } - fn is_compacted(&self, pos: u64) -> bool { - self.is_pruned(pos) && !self.is_pruned_root(pos) + // Check if pos is pruned but not a pruned root itself. + // Checking for pruned root is faster so we do this check first. + // We can do a fast initial check as well - + // if its in the current leaf_set then we know it is not compacted. + fn is_compacted(&self, pos0: u64) -> bool { + if self.leaf_set.includes(pos0) { + return false; + } + !self.is_pruned_root(pos0) && self.is_pruned(pos0) } /// Number of hashes in the PMMR stored by this backend. Only produces the @@ -319,10 +381,11 @@ impl PMMRBackend { .and(self.hash_file.flush()) .and(self.data_file.flush()) .and(self.sync_leaf_set()) + .and(self.prune_list.flush()) .map_err(|e| { io::Error::new( io::ErrorKind::Interrupted, - format!("Could not sync pmmr to disk, {:?}", e), + format!("Could not sync pmmr to disk: {:?}", e), ) }) } @@ -358,22 +421,22 @@ impl PMMRBackend { // on the cutoff_pos provided. let (leaves_removed, pos_to_rm) = self.pos_to_rm(cutoff_pos, rewind_rm_pos); - // 1. Save compact copy of the hash file, skipping removed data. + // Save compact copy of the hash file, skipping removed data. { - let pos_to_rm = map_vec!(pos_to_rm, |pos| { - let shift = self.prune_list.get_shift(pos.into()); - pos as u64 - shift + let pos_to_rm = map_vec!(pos_to_rm, |pos1| { + let shift = self.prune_list.get_shift(pos1 as u64 - 1); + pos1 as u64 - shift }); - self.hash_file.save_prune(&pos_to_rm)?; + self.hash_file.write_tmp_pruned(&pos_to_rm)?; } - // 2. Save compact copy of the data file, skipping removed leaves. + // Save compact copy of the data file, skipping removed leaves. { let leaf_pos_to_rm = pos_to_rm .iter() - .filter(|&x| pmmr::is_leaf(x.into())) .map(|x| x as u64) + .filter(|x| pmmr::is_leaf(x - 1)) .collect::>(); let pos_to_rm = map_vec!(leaf_pos_to_rm, |&pos| { @@ -382,22 +445,30 @@ impl PMMRBackend { flat_pos - shift }); - self.data_file.save_prune(&pos_to_rm)?; + self.data_file.write_tmp_pruned(&pos_to_rm)?; } - // 3. Update the prune list and write to disk. + // Replace hash and data files with compact copies. + // Rebuild and intialize from the new files. { - for pos in leaves_removed.iter() { - self.prune_list.add(pos.into()); - } + debug!("compact: about to replace hash and data files and rebuild..."); + self.hash_file.replace_with_tmp()?; + self.data_file.replace_with_tmp()?; + debug!("compact: ...finished replacing and rebuilding"); + } + + // Update the prune list and write to disk. + { + let mut bitmap = self.prune_list.bitmap(); + bitmap.or_inplace(&leaves_removed); + self.prune_list = PruneList::new(Some(self.data_dir.join(PMMR_PRUN_FILE)), bitmap); self.prune_list.flush()?; } - // 4. Write the leaf_set to disk. + // Write the leaf_set to disk. // Optimize the bitmap storage in the process. self.leaf_set.flush()?; - // 5. cleanup rewind files self.clean_rewind_files()?; Ok(true) @@ -420,19 +491,19 @@ impl PMMRBackend { expanded.add(x); let mut current = x as u64; loop { - let (parent, sibling) = family(current); - let sibling_pruned = self.is_pruned_root(sibling); + let (parent0, sibling0) = family(current - 1); + let sibling_pruned = self.is_pruned_root(sibling0); // if sibling previously pruned // push it back onto list of pos to remove // so we can remove it and traverse up to parent if sibling_pruned { - expanded.add(sibling as u32); + expanded.add(1 + sibling0 as u32); } - if sibling_pruned || expanded.contains(sibling as u32) { - expanded.add(parent as u32); - current = parent; + if sibling_pruned || expanded.contains(1 + sibling0 as u32) { + expanded.add(1 + parent0 as u32); + current = 1 + parent0; } else { break; } @@ -448,8 +519,8 @@ fn removed_excl_roots(removed: &Bitmap) -> Bitmap { removed .iter() .filter(|pos| { - let (parent_pos, _) = family(*pos as u64); - removed.contains(parent_pos as u32) + let (parent_pos0, _) = family(*pos as u64 - 1); + removed.contains(1 + parent_pos0 as u32) }) .collect() } diff --git a/store/src/prune_list.rs b/store/src/prune_list.rs index 555b11552a..2becf85f65 100644 --- a/store/src/prune_list.rs +++ b/store/src/prune_list.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -21,13 +21,18 @@ //! must be shifted the appropriate amount when reading from the hash and data //! files. -use std::io::{self, Write}; use std::path::{Path, PathBuf}; +use std::{ + io::{self, Write}, + ops::Range, +}; use croaring::{Bitmap, Portable}; +use grin_core::core::pmmr; -use crate::core::core::pmmr::{bintree_postorder_height, family, path}; +use crate::grin_core::core::pmmr::{bintree_leftmost, bintree_postorder_height, family}; use crate::{read_bitmap, save_via_temp_file}; +use std::cmp::min; /// Maintains a list of previously pruned nodes in PMMR, compacting the list as /// parents get pruned and allowing checking whether a leaf is pruned. Given @@ -40,29 +45,33 @@ use crate::{read_bitmap, save_via_temp_file}; /// but positions of a node within the PMMR will not match positions in the /// backend storage anymore. The PruneList accounts for that mismatch and does /// the position translation. +#[derive(Debug)] pub struct PruneList { path: Option, /// Bitmap representing pruned root node positions. bitmap: Bitmap, - /// Bitmap representing all pruned node positions (everything under the pruned roots). - pruned_cache: Bitmap, shift_cache: Vec, leaf_shift_cache: Vec, } impl PruneList { - /// Instantiate a new prune list from the provided path and bitmap. - pub fn new(path: Option, mut bitmap: Bitmap) -> PruneList { - // Note: prune list is 1-indexed so remove any 0 value for safety. - bitmap.remove(0); - - PruneList { + /// Instantiate a new prune list from the provided path and 1-based bitmap. + /// Note: Does not flush the bitmap to disk. Caller is responsible for doing this. + pub fn new(path: Option, bitmap: Bitmap) -> PruneList { + assert!(!bitmap.contains(0)); + let mut prune_list = PruneList { path, - bitmap, - pruned_cache: Bitmap::new(), + bitmap: Bitmap::new(), shift_cache: vec![], leaf_shift_cache: vec![], + }; + + for pos1 in bitmap.iter() { + prune_list.append(pos1 as u64 - 1) } + + prune_list.bitmap.run_optimize(); + prune_list } /// Instatiate a new empty prune list. @@ -71,6 +80,7 @@ impl PruneList { } /// Open an existing prune_list or create a new one. + /// Takes an optional bitmap of new pruned pos to be combined with existing pos. pub fn open>(path: P) -> io::Result { let file_path = PathBuf::from(path.as_ref()); let bitmap = if file_path.exists() { @@ -78,18 +88,18 @@ impl PruneList { } else { Bitmap::new() }; + assert!(!bitmap.contains(0)); let mut prune_list = PruneList::new(Some(file_path), bitmap); - // Now built the shift and pruned caches from the bitmap we read from disk. + // Now build the shift caches from the bitmap we read from disk prune_list.init_caches(); if !prune_list.bitmap.is_empty() { - debug!("bitmap {} pos ({} bytes), pruned_cache {} pos ({} bytes), shift_cache {}, leaf_shift_cache {}", + debug!( + "bitmap {} pos ({} bytes), shift_cache {}, leaf_shift_cache {}", prune_list.bitmap.cardinality(), prune_list.bitmap.get_serialized_size_in_bytes::(), - prune_list.pruned_cache.cardinality(), - prune_list.pruned_cache.get_serialized_size_in_bytes::(), prune_list.shift_cache.len(), prune_list.leaf_shift_cache.len(), ); @@ -102,12 +112,9 @@ impl PruneList { pub fn init_caches(&mut self) { self.build_shift_cache(); self.build_leaf_shift_cache(); - self.build_pruned_cache(); } /// Save the prune_list to disk. - /// Clears out leaf pos before saving to disk - /// as we track these via the leaf_set. pub fn flush(&mut self) -> io::Result<()> { // Run the optimization step on the bitmap. self.bitmap.run_optimize(); @@ -119,58 +126,45 @@ impl PruneList { })?; } - // Rebuild our "shift caches" here as we are flushing changes to disk - // and the contents of our prune_list has likely changed. - self.init_caches(); - Ok(()) } /// Return the total shift from all entries in the prune_list. /// This is the shift we need to account for when adding new entries to our PMMR. pub fn get_total_shift(&self) -> u64 { - self.get_shift(self.bitmap.maximum().unwrap_or(0) as u64) + self.get_shift(self.bitmap.maximum().unwrap_or(1) as u64 - 1) } /// Return the total leaf_shift from all entries in the prune_list. /// This is the leaf_shift we need to account for when adding new entries to our PMMR. pub fn get_total_leaf_shift(&self) -> u64 { - self.get_leaf_shift(self.bitmap.maximum().unwrap_or(0) as u64) + self.get_leaf_shift(self.bitmap.maximum().unwrap_or(1) as u64 - 1) } /// Computes by how many positions a node at pos should be shifted given the /// number of nodes that have already been pruned before it. /// Note: the node at pos may be pruned and may be compacted away itself and /// the caller needs to be aware of this. - pub fn get_shift(&self, pos: u64) -> u64 { - if self.bitmap.is_empty() { - return 0; - } - - let idx = self.bitmap.rank(pos as u32); + pub fn get_shift(&self, pos0: u64) -> u64 { + let idx = self.bitmap.rank(1 + pos0 as u32); if idx == 0 { return 0; } - - if idx > self.shift_cache.len() as u64 { - self.shift_cache[self.shift_cache.len().saturating_sub(1)] - } else { - self.shift_cache[(idx as usize).saturating_sub(1)] - } + self.shift_cache[min(idx as usize, self.shift_cache.len()) - 1] } fn build_shift_cache(&mut self) { - if self.bitmap.is_empty() { - return; - } - self.shift_cache.clear(); - for pos in self.bitmap.iter().filter(|x| *x > 0) { - let pos = pos as u64; - let prev_shift = self.get_shift(pos.saturating_sub(1)); + for pos1 in self.bitmap.iter() { + let pos0 = pos1 as u64 - 1; + let prev_shift = if pos0 == 0 { + 0 + } else { + self.get_shift(pos0 - 1) + }; - let curr_shift = if self.is_pruned_root(pos) { - let height = bintree_postorder_height(pos); + let curr_shift = if self.is_pruned_root(pos0) { + let height = bintree_postorder_height(pos0); 2 * ((1 << height) - 1) } else { 0 @@ -180,39 +174,45 @@ impl PruneList { } } + // Calculate the next shift based on provided pos and the previous shift. + fn calculate_next_shift(&self, pos0: u64) -> u64 { + let prev_shift = if pos0 == 0 { + 0 + } else { + self.get_shift(pos0 - 1) + }; + let shift = if self.is_pruned_root(pos0) { + let height = bintree_postorder_height(pos0); + 2 * ((1 << height) - 1) + } else { + 0 + }; + prev_shift + shift + } + /// As above, but only returning the number of leaf nodes to skip for a /// given leaf. Helpful if, for instance, data for each leaf is being stored /// separately in a continuous flat-file. - pub fn get_leaf_shift(&self, pos: u64) -> u64 { - if self.bitmap.is_empty() { - return 0; - } - - let idx = self.bitmap.rank(pos as u32); + pub fn get_leaf_shift(&self, pos0: u64) -> u64 { + let idx = self.bitmap.rank(1 + pos0 as u32); if idx == 0 { return 0; } - - if idx > self.leaf_shift_cache.len() as u64 { - self.leaf_shift_cache[self.leaf_shift_cache.len().saturating_sub(1)] - } else { - self.leaf_shift_cache[(idx as usize).saturating_sub(1)] - } + self.leaf_shift_cache[min(idx as usize, self.leaf_shift_cache.len()) - 1] } fn build_leaf_shift_cache(&mut self) { - if self.bitmap.is_empty() { - return; - } - self.leaf_shift_cache.clear(); + for pos1 in self.bitmap.iter() { + let pos0 = pos1 as u64 - 1; + let prev_shift = if pos0 == 0 { + 0 + } else { + self.get_leaf_shift(pos0 - 1) + }; - for pos in self.bitmap.iter().filter(|x| *x > 0) { - let pos = pos as u64; - let prev_shift = self.get_leaf_shift(pos.saturating_sub(1)); - - let curr_shift = if self.is_pruned_root(pos) { - let height = bintree_postorder_height(pos); + let curr_shift = if self.is_pruned_root(pos0) { + let height = bintree_postorder_height(pos0); if height == 0 { 0 } else { @@ -226,25 +226,88 @@ impl PruneList { } } - /// Push the node at the provided position in the prune list. Compacts the - /// list if pruning the additional node means a parent can get pruned as - /// well. - pub fn add(&mut self, pos: u64) { - assert!(pos > 0, "prune list 1-indexed, 0 not valid pos"); - - let mut current = pos; - loop { - let (parent, sibling) = family(current); - - if self.bitmap.contains(sibling as u32) || self.pruned_cache.contains(sibling as u32) { - self.pruned_cache.add(current as u32); - self.bitmap.remove(sibling as u32); - current = parent; + // Calculate the next leaf shift based on provided pos and the previous leaf shift. + fn calculate_next_leaf_shift(&self, pos0: u64) -> u64 { + let prev_shift = if pos0 == 0 { + 0 + } else { + self.get_leaf_shift(pos0 - 1) + }; + let shift = if self.is_pruned_root(pos0) { + let height = bintree_postorder_height(pos0); + if height == 0 { + 0 } else { - self.pruned_cache.add(current as u32); - self.bitmap.add(current as u32); - break; + 1 << height } + } else { + 0 + }; + prev_shift + shift + } + + // Remove any existing entries in shift_cache and leaf_shift_cache + // for any pos contained in the subtree with provided root. + fn cleanup_subtree(&mut self, pos0: u64) { + let lc0 = bintree_leftmost(pos0) as u32; + let size = self.bitmap.maximum().unwrap_or(0); + + // If this subtree does not intersect with existing bitmap then nothing to cleanup. + if lc0 >= size { + return; + } + + // Note: We will treat this as a "closed range" below (croaring api weirdness). + // Note: After croaring upgrade to 1.0.2 we provide an inclusive range directly + let cleanup_pos1 = (lc0 + 1)..=size; + + // Find point where we can truncate based on bitmap "rank" (index) of pos to the left of subtree. + let idx = self.bitmap.rank(lc0); + self.shift_cache.truncate(idx as usize); + self.leaf_shift_cache.truncate(idx as usize); + + self.bitmap.remove_range(cleanup_pos1) + } + + /// Push the node at the provided position in the prune list. + /// Assumes rollup of siblings and children has already been handled. + fn append_single(&mut self, pos0: u64) { + assert!( + pos0 >= self.bitmap.maximum().unwrap_or(0) as u64, + "prune list append only" + ); + + // Add this pos to the bitmap (leaf or subtree root) + self.bitmap.add(1 + pos0 as u32); + + // Calculate shift and leaf_shift for this pos. + self.shift_cache.push(self.calculate_next_shift(pos0)); + self.leaf_shift_cache + .push(self.calculate_next_leaf_shift(pos0)); + } + + /// Push the node at the provided position in the prune list. + /// Handles rollup of siblings and children as we go (relatively slow). + /// Once we find a subtree root that can not be rolled up any further + /// we cleanup everything beneath it and replace it with a single appended node. + pub fn append(&mut self, pos0: u64) { + let max = self.bitmap.maximum().unwrap_or(0) as u64; + assert!( + pos0 >= max, + "prune list append only - pos={} bitmap.maximum={}", + pos0, + max + ); + + let (parent0, sibling0) = family(pos0); + if self.is_pruned(sibling0) { + // Recursively append the parent (removing our sibling in the process). + self.append(parent0) + } else { + // Make sure we roll anything beneath this up into this higher level pruned subtree root. + // We should have no nested entries in the prune_list. + self.cleanup_subtree(pos0); + self.append_single(pos0); } } @@ -258,36 +321,113 @@ impl PruneList { self.bitmap.is_empty() } + /// A pos is pruned if it is a pruned root directly or if it is + /// beneath the "next" pruned subtree. + /// We only need to consider the "next" subtree due to the append-only MMR structure. + pub fn is_pruned(&self, pos0: u64) -> bool { + if self.is_pruned_root(pos0) { + return true; + } + let rank = self.bitmap.rank(1 + pos0 as u32); + if let Some(root) = self.bitmap.select(rank as u32) { + let range = pmmr::bintree_range(root as u64 - 1); + range.contains(&pos0) + } else { + false + } + } + /// Convert the prune_list to a vec of pos. pub fn to_vec(&self) -> Vec { self.bitmap.iter().map(|x| x as u64).collect() } - /// Is the pos pruned? - /// Assumes the pruned_cache is fully built and up to date. - pub fn is_pruned(&self, pos: u64) -> bool { - assert!(pos > 0, "prune list 1-indexed, 0 not valid pos"); - self.pruned_cache.contains(pos as u32) + /// Internal shift cache as slice. + /// only used in store/tests/prune_list.rs tests + pub fn shift_cache(&self) -> &[u64] { + self.shift_cache.as_slice() } - fn build_pruned_cache(&mut self) { - if self.bitmap.is_empty() { - return; - } - let maximum = self.bitmap.maximum().unwrap_or(0); - self.pruned_cache = Bitmap::with_container_capacity(maximum); - for pos in 1..(maximum + 1) { - let pruned = path(pos as u64, maximum as u64).any(|x| self.bitmap.contains(x as u32)); - if pruned { - self.pruned_cache.add(pos as u32) - } - } - self.pruned_cache.run_optimize(); + /// Internal leaf shift cache as slice. + /// only used in store/tests/prune_list.rs tests + pub fn leaf_shift_cache(&self) -> &[u64] { + self.leaf_shift_cache.as_slice() } /// Is the specified position a root of a pruned subtree? - pub fn is_pruned_root(&self, pos: u64) -> bool { - assert!(pos > 0, "prune list 1-indexed, 0 not valid pos"); - self.bitmap.contains(pos as u32) + pub fn is_pruned_root(&self, pos0: u64) -> bool { + self.bitmap.contains(1 + pos0 as u32) + } + + /// Iterator over the entries in the prune list (pruned roots). + pub fn iter(&self) -> impl Iterator + '_ { + self.bitmap.iter().map(|x| x as u64) + } + + /// Iterator over the pruned "bintree range" for each pruned root. + pub fn pruned_bintree_range_iter(&self) -> impl Iterator> + '_ { + self.iter().map(|x| { + let rng = pmmr::bintree_range(x - 1); + (1 + rng.start)..(1 + rng.end) + }) + } + + /// Iterator over all pos that are *not* pruned based on current prune_list. + pub fn unpruned_iter(&self, cutoff_pos: u64) -> impl Iterator + '_ { + UnprunedIterator::new(self.pruned_bintree_range_iter()) + .take_while(move |x| *x <= cutoff_pos) + } + + /// Iterator over all leaf pos that are *not* pruned based on current prune_list. + /// Note this is not necessarily the same as the "leaf_set" as an output + /// can be spent but not yet pruned. + pub fn unpruned_leaf_iter(&self, cutoff_pos: u64) -> impl Iterator + '_ { + self.unpruned_iter(cutoff_pos) + .filter(|x| pmmr::is_leaf(*x - 1)) + } + + /// Return a clone of our internal bitmap. + pub fn bitmap(&self) -> Bitmap { + self.bitmap.clone() + } +} + +struct UnprunedIterator { + inner: I, + current_excl_range: Option>, + current_pos: u64, +} + +impl>> UnprunedIterator { + fn new(mut inner: I) -> UnprunedIterator { + let current_excl_range = inner.next(); + UnprunedIterator { + inner, + current_excl_range, + current_pos: 1, + } + } +} + +impl>> Iterator for UnprunedIterator { + type Item = u64; + + fn next(&mut self) -> Option { + if let Some(range) = &self.current_excl_range { + if self.current_pos < range.start { + let next = self.current_pos; + self.current_pos += 1; + Some(next) + } else { + // skip the entire excluded range, moving to next excluded range as necessary + self.current_pos = range.end; + self.current_excl_range = self.inner.next(); + self.next() + } + } else { + let next = self.current_pos; + self.current_pos += 1; + Some(next) + } } } diff --git a/store/src/types.rs b/store/src/types.rs index db7709d897..8a2b88893c 100644 --- a/store/src/types.rs +++ b/store/src/types.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -12,11 +12,11 @@ // limitations under the License. //! Common storage-related types -use memmap; use tempfile::tempfile; -use crate::core::ser::{ - self, BinWriter, ProtocolVersion, Readable, Reader, StreamingReader, Writeable, Writer, +use crate::grin_core::ser::{ + self, BinWriter, DeserializationMode, ProtocolVersion, Readable, Reader, StreamingReader, + Writeable, Writer, }; use std::fmt::Debug; use std::fs::{self, File, OpenOptions}; @@ -99,6 +99,14 @@ where Ok(self.size_unsync()) } + /// Append a slice of multiple elements to the file. + /// Will not be written to disk until flush() is subsequently called. + /// Alternatively discard() may be called to discard any pending changes. + pub fn extend_from_slice(&mut self, data: &[T]) -> io::Result { + self.file.append_elmts(data)?; + Ok(self.size_unsync()) + } + /// Read an element from the file by position. /// Assumes we have already "shifted" the position to account for pruned data. /// Note: PMMR API is 1-indexed, but backend storage is 0-indexed. @@ -146,10 +154,16 @@ where } /// Write the file out to disk, pruning removed elements. - pub fn save_prune(&mut self, prune_pos: &[u64]) -> io::Result<()> { + pub fn write_tmp_pruned(&self, prune_pos: &[u64]) -> io::Result<()> { // Need to convert from 1-index to 0-index (don't ask). let prune_idx: Vec<_> = prune_pos.iter().map(|x| x - 1).collect(); - self.file.save_prune(prune_idx.as_slice()) + self.file.write_tmp_pruned(prune_idx.as_slice()) + } + + /// Replace with file at tmp path. + /// Rebuild and initialize from new file. + pub fn replace_with_tmp(&mut self) -> io::Result<()> { + self.file.replace_with_tmp() } } @@ -281,6 +295,14 @@ where Ok(()) } + /// Iterate over the slice and append each element. + fn append_elmts(&mut self, data: &[T]) -> io::Result<()> { + for x in data { + self.append_elmt(x)?; + } + Ok(()) + } + /// Append data to the file. Until the append-only file is synced, data is /// only written to memory. pub fn append(&mut self, bytes: &mut [u8]) -> io::Result<()> { @@ -289,7 +311,7 @@ where let offset = if next_pos == 0 { 0 } else { - let prev_entry = size_file.read_as_elmt(next_pos.saturating_sub(1))?; + let prev_entry = size_file.read_as_elmt(next_pos - 1)?; prev_entry.offset + prev_entry.size as u64 }; size_file.append_elmt(&SizeEntry { @@ -354,8 +376,7 @@ where if self.buffer_start_pos == 0 { file.set_len(0)?; } else { - let (offset, size) = - self.offset_and_size(self.buffer_start_pos.saturating_sub(1))?; + let (offset, size) = self.offset_and_size(self.buffer_start_pos - 1)?; file.set_len(offset + size as u64)?; }; } @@ -423,12 +444,14 @@ where fn read_as_elmt(&self, pos: u64) -> io::Result { let data = self.read(pos)?; - ser::deserialize(&mut &data[..], self.version).map_err(|e| { - io::Error::new( - io::ErrorKind::Other, - format!("Fail to deserialize data, {}", e), - ) - }) + ser::deserialize(&mut &data[..], self.version, DeserializationMode::default()).map_err( + |e| { + io::Error::new( + io::ErrorKind::Other, + format!("Fail to deserialize data, {}", e), + ) + }, + ) } // Read length bytes starting at offset from the buffer. @@ -474,40 +497,47 @@ where Ok(file) } + fn tmp_path(&self) -> PathBuf { + self.path.with_extension("tmp") + } + /// Saves a copy of the current file content, skipping data at the provided /// prune positions. prune_pos must be ordered. - pub fn save_prune(&mut self, prune_pos: &[u64]) -> io::Result<()> { - let tmp_path = self.path.with_extension("tmp"); - - // Scope the reader and writer to within the block so we can safely replace files later on. - { - let reader = File::open(&self.path)?; - let mut buf_reader = BufReader::new(reader); - let mut streaming_reader = StreamingReader::new(&mut buf_reader, self.version); - - let mut buf_writer = BufWriter::new(File::create(&tmp_path)?); - let mut bin_writer = BinWriter::new(&mut buf_writer, self.version); - - let mut current_pos = 0; - let mut prune_pos = prune_pos; - while let Ok(elmt) = T::read(&mut streaming_reader) { - if prune_pos.contains(¤t_pos) { - // Pruned pos, moving on. - prune_pos = &prune_pos[1..]; - } else { - // Not pruned, write to file. - elmt.write(&mut bin_writer).map_err(|e| { - io::Error::new(io::ErrorKind::Other, format!("Fail to write prune, {}", e)) - })?; - } - current_pos += 1; + pub fn write_tmp_pruned(&self, prune_pos: &[u64]) -> io::Result<()> { + let reader = File::open(&self.path)?; + let mut buf_reader = BufReader::new(reader); + let mut streaming_reader = StreamingReader::new(&mut buf_reader, self.version); + + let mut buf_writer = BufWriter::new(File::create(&self.tmp_path())?); + let mut bin_writer = BinWriter::new(&mut buf_writer, self.version); + + let mut current_pos = 0; + let mut prune_pos = prune_pos; + while let Ok(elmt) = T::read(&mut streaming_reader) { + if prune_pos.contains(¤t_pos) { + // Pruned pos, moving on. + prune_pos = &prune_pos[1..]; + } else { + // Not pruned, write to file. + elmt.write(&mut bin_writer).map_err(|e| { + io::Error::new( + io::ErrorKind::Other, + format!("Fail to write at write_tmp_pruned, {}", e), + ) + })?; } - buf_writer.flush()?; + current_pos += 1; } + buf_writer.flush()?; + Ok(()) + } + /// Replace the underlying file with the file at tmp path. + /// Rebuild and initialize from the new file. + pub fn replace_with_tmp(&mut self) -> io::Result<()> { // Replace the underlying file - // pmmr_data.tmp -> pmmr_data.bin - self.replace(&tmp_path)?; + self.replace(&self.tmp_path())?; // Now rebuild our size file to reflect the pruned data file. // This will replace the underlying file internally. diff --git a/store/tests/lmdb.rs b/store/tests/lmdb.rs index cceb4c11ef..dc3d2329fd 100644 --- a/store/tests/lmdb.rs +++ b/store/tests/lmdb.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -65,9 +65,75 @@ fn setup(test_dir: &str) { clean_output_dir(test_dir); } +#[test] +fn test_exists() -> Result<(), store::Error> { + let test_dir = "target/test_exists"; + setup(test_dir); + + let store = store::Store::new(test_dir, Some("test1"), None, None)?; + + let key = [0, 0, 0, 1]; + let value = [1, 1, 1, 1]; + + // Start new batch and insert a new key/value entry. + let batch = store.batch()?; + batch.put(&key, &value)?; + + // Check we can see the new entry in uncommitted batch. + assert!(batch.exists(&key)?); + + // Check we cannot see the new entry yet outside of the uncommitted batch. + assert!(!store.exists(&key)?); + + batch.commit()?; + + // Check we can see the new entry after committing the batch. + assert!(store.exists(&key)?); + + clean_output_dir(test_dir); + Ok(()) +} + +#[test] +fn test_iter() -> Result<(), store::Error> { + let test_dir = "target/test_iter"; + setup(test_dir); + + let store = store::Store::new(test_dir, Some("test1"), None, None)?; + + let key = [0, 0, 0, 1]; + let value = [1, 1, 1, 1]; + + // Start new batch and insert a new key/value entry. + let batch = store.batch()?; + batch.put(&key, &value)?; + + // TODO - This is not currently possible (and we need to be aware of this). + // Currently our SerIterator is limited to using a ReadTransaction only. + // + // Check we can see the new entry via an iterator using the uncommitted batch. + // let mut iter: SerIterator> = batch.iter(&[0])?; + // assert_eq!(iter.next(), Some((key.to_vec(), value.to_vec()))); + // assert_eq!(iter.next(), None); + + // Check we can not yet see the new entry via an iterator outside the uncommitted batch. + let mut iter = store.iter(&[0], |_, v| Ok(v.to_vec()))?; + assert_eq!(iter.next(), None); + + batch.commit()?; + + // Check we can see the new entry via an iterator after committing the batch. + let mut iter = store.iter(&[0], |_, v| Ok(v.to_vec()))?; + assert_eq!(iter.next(), Some(value.to_vec())); + assert_eq!(iter.next(), None); + + clean_output_dir(test_dir); + Ok(()) +} + #[test] fn lmdb_allocate() -> Result<(), store::Error> { - let test_dir = "test_output/lmdb_allocate"; + let test_dir = "target/lmdb_allocate"; setup(test_dir); // Allocate more than the initial chunk, ensuring // the DB resizes underneath diff --git a/store/tests/pmmr.rs b/store/tests/pmmr.rs index 1c0b1b32b1..935b39a6b4 100644 --- a/store/tests/pmmr.rs +++ b/store/tests/pmmr.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -22,7 +22,7 @@ use chrono::prelude::Utc; use croaring::Bitmap; use crate::core::core::hash::DefaultHashable; -use crate::core::core::pmmr::{Backend, PMMR}; +use crate::core::core::pmmr::{Backend, ReadablePMMR, PMMR}; use crate::core::ser::{ Error, PMMRIndexHashable, PMMRable, ProtocolVersion, Readable, Reader, Writeable, Writer, }; @@ -46,7 +46,7 @@ fn pmmr_leaf_idx_iter() { // The first 5 leaves [0,1,2,3,4] are at pos [1,2,4,5,8] in the MMR. assert_eq!(leaf_idx, vec![0, 1, 2, 3, 4]); - assert_eq!(leaf_pos, vec![1, 2, 4, 5, 8]); + assert_eq!(leaf_pos, vec![0, 1, 3, 4, 7]); } } teardown(data_dir); @@ -73,12 +73,12 @@ fn pmmr_append() { let pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); assert_eq!(pmmr.n_unpruned_leaves(), 4); - assert_eq!(pmmr.get_data(1), Some(elems[0])); - assert_eq!(pmmr.get_data(2), Some(elems[1])); + assert_eq!(pmmr.get_data(0), Some(elems[0])); + assert_eq!(pmmr.get_data(1), Some(elems[1])); - assert_eq!(pmmr.get_hash(1), Some(pos_0)); - assert_eq!(pmmr.get_hash(2), Some(pos_1)); - assert_eq!(pmmr.get_hash(3), Some(pos_2)); + assert_eq!(pmmr.get_hash(0), Some(pos_0)); + assert_eq!(pmmr.get_hash(1), Some(pos_1)); + assert_eq!(pmmr.get_hash(2), Some(pos_2)); } // adding the rest and sync again @@ -111,22 +111,22 @@ fn pmmr_append() { assert_eq!(pmmr.n_unpruned_leaves(), 9); // First pair of leaves. - assert_eq!(pmmr.get_data(1), Some(elems[0])); - assert_eq!(pmmr.get_data(2), Some(elems[1])); + assert_eq!(pmmr.get_data(0), Some(elems[0])); + assert_eq!(pmmr.get_data(1), Some(elems[1])); // Second pair of leaves. - assert_eq!(pmmr.get_data(4), Some(elems[2])); - assert_eq!(pmmr.get_data(5), Some(elems[3])); + assert_eq!(pmmr.get_data(3), Some(elems[2])); + assert_eq!(pmmr.get_data(4), Some(elems[3])); // Third pair of leaves. - assert_eq!(pmmr.get_data(8), Some(elems[4])); - assert_eq!(pmmr.get_data(9), Some(elems[5])); - assert_eq!(pmmr.get_hash(10), Some(pos_9)); + assert_eq!(pmmr.get_data(7), Some(elems[4])); + assert_eq!(pmmr.get_data(8), Some(elems[5])); + assert_eq!(pmmr.get_hash(9), Some(pos_9)); } // check the resulting backend store and the computation of the root let node_hash = elems[0].hash_with_index(0); - assert_eq!(backend.get_hash(1).unwrap(), node_hash); + assert_eq!(backend.get_hash(0).unwrap(), node_hash); { let pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); @@ -154,22 +154,22 @@ fn pmmr_compact_leaf_sibling() { // pos 1 and 2 are leaves (and siblings) // the parent is pos 3 - let (pos_1_hash, pos_2_hash, pos_3_hash) = { + let (pos_0_hash, pos_1_hash, pos_2_hash) = { let pmmr = PMMR::at(&mut backend, mmr_size); ( + pmmr.get_hash(0).unwrap(), pmmr.get_hash(1).unwrap(), pmmr.get_hash(2).unwrap(), - pmmr.get_hash(3).unwrap(), ) }; // prune pos 1 { let mut pmmr = PMMR::at(&mut backend, mmr_size); - pmmr.prune(1).unwrap(); + pmmr.prune(0).unwrap(); // prune pos 8 as well to push the remove list past the cutoff - pmmr.prune(8).unwrap(); + pmmr.prune(7).unwrap(); } backend.sync().unwrap(); @@ -179,36 +179,36 @@ fn pmmr_compact_leaf_sibling() { assert_eq!(pmmr.n_unpruned_leaves(), 17); - // check that pos 1 is "removed" - assert_eq!(pmmr.get_hash(1), None); + // check that pos 0 is "removed" + assert_eq!(pmmr.get_hash(0), None); - // check that pos 2 and 3 are unchanged + // check that pos 1 and 2 are unchanged + assert_eq!(pmmr.get_hash(1).unwrap(), pos_1_hash); assert_eq!(pmmr.get_hash(2).unwrap(), pos_2_hash); - assert_eq!(pmmr.get_hash(3).unwrap(), pos_3_hash); } - // check we can still retrieve the "removed" element at pos 1 + // check we can still retrieve the "removed" element at pos 0 // from the backend hash file. - assert_eq!(backend.get_from_file(1).unwrap(), pos_1_hash); + assert_eq!(backend.get_from_file(0).unwrap(), pos_0_hash); // aggressively compact the PMMR files backend.check_compact(1, &Bitmap::new()).unwrap(); - // check pos 1, 2, 3 are in the state we expect after compacting + // check pos 0, 1, 2 are in the state we expect after compacting { let pmmr = PMMR::at(&mut backend, mmr_size); - // check that pos 1 is "removed" - assert_eq!(pmmr.get_hash(1), None); + // check that pos 0 is "removed" + assert_eq!(pmmr.get_hash(0), None); - // check that pos 2 and 3 are unchanged + // check that pos 1 and 2 are unchanged + assert_eq!(pmmr.get_hash(1).unwrap(), pos_1_hash); assert_eq!(pmmr.get_hash(2).unwrap(), pos_2_hash); - assert_eq!(pmmr.get_hash(3).unwrap(), pos_3_hash); } // Check we can still retrieve the "removed" hash at pos 1 from the hash file. // It should still be available even after pruning and compacting. - assert_eq!(backend.get_from_file(1).unwrap(), pos_1_hash); + assert_eq!(backend.get_from_file(0).unwrap(), pos_0_hash); } teardown(data_dir); @@ -235,9 +235,9 @@ fn pmmr_prune_compact() { // pruning some choice nodes { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); - pmmr.prune(1).unwrap(); + pmmr.prune(0).unwrap(); + pmmr.prune(3).unwrap(); pmmr.prune(4).unwrap(); - pmmr.prune(5).unwrap(); } backend.sync().unwrap(); @@ -246,9 +246,9 @@ fn pmmr_prune_compact() { let pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); assert_eq!(root, pmmr.root().unwrap()); // check we can still retrieve same element from leaf index 2 - assert_eq!(pmmr.get_data(2).unwrap(), TestElem(2)); + assert_eq!(pmmr.get_data(1).unwrap(), TestElem(2)); // and the same for leaf index 7 - assert_eq!(pmmr.get_data(11).unwrap(), TestElem(7)); + assert_eq!(pmmr.get_data(10).unwrap(), TestElem(7)); } // compact @@ -258,8 +258,8 @@ fn pmmr_prune_compact() { { let pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); assert_eq!(root, pmmr.root().unwrap()); - assert_eq!(pmmr.get_data(2).unwrap(), TestElem(2)); - assert_eq!(pmmr.get_data(11).unwrap(), TestElem(7)); + assert_eq!(pmmr.get_data(1).unwrap(), TestElem(2)); + assert_eq!(pmmr.get_data(10).unwrap(), TestElem(7)); } } @@ -279,9 +279,9 @@ fn pmmr_reload() { let mmr_size = load(0, &elems[..], &mut backend); // retrieve entries from the hash file for comparison later + let pos_2_hash = backend.get_hash(2).unwrap(); let pos_3_hash = backend.get_hash(3).unwrap(); let pos_4_hash = backend.get_hash(4).unwrap(); - let pos_5_hash = backend.get_hash(5).unwrap(); // save the root let root = { @@ -296,7 +296,7 @@ fn pmmr_reload() { // prune a node so we have prune data { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); - pmmr.prune(1).unwrap(); + pmmr.prune(0).unwrap(); } backend.sync().unwrap(); assert_eq!(backend.unpruned_size(), mmr_size); @@ -310,8 +310,8 @@ fn pmmr_reload() { // prune another node to force compact to actually do something { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); - pmmr.prune(4).unwrap(); - pmmr.prune(2).unwrap(); + pmmr.prune(3).unwrap(); + pmmr.prune(1).unwrap(); } backend.sync().unwrap(); assert_eq!(backend.unpruned_size(), mmr_size); @@ -324,13 +324,13 @@ fn pmmr_reload() { // prune some more to get rm log data { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); - pmmr.prune(5).unwrap(); + pmmr.prune(4).unwrap(); } backend.sync().unwrap(); assert_eq!(backend.unpruned_size(), mmr_size); } - // new a new backend referencing the data files + // create a new backend referencing the data files // and check everything still works as expected { let mut backend = @@ -342,29 +342,29 @@ fn pmmr_reload() { assert_eq!(root, pmmr.root().unwrap()); } - // pos 1 and pos 2 are both removed (via parent pos 3 in prune list) + // pos 0 and pos 1 are both removed (via parent pos 2 in prune list) + assert_eq!(backend.get_hash(0), None); assert_eq!(backend.get_hash(1), None); - assert_eq!(backend.get_hash(2), None); - // pos 3 is "removed" but we keep the hash around for root of pruned subtree - assert_eq!(backend.get_hash(3), Some(pos_3_hash)); + // pos 2 is "removed" but we keep the hash around for root of pruned subtree + assert_eq!(backend.get_hash(2), Some(pos_2_hash)); - // pos 4 is removed (via prune list) + // pos 3 is removed (via prune list) + assert_eq!(backend.get_hash(3), None); + // pos 4 is removed (via leaf_set) assert_eq!(backend.get_hash(4), None); - // pos 5 is removed (via leaf_set) - assert_eq!(backend.get_hash(5), None); // now check contents of the hash file - // pos 1 and pos 2 are no longer in the hash file + // pos 0 and pos 1 are no longer in the hash file + assert_eq!(backend.get_from_file(0), None); assert_eq!(backend.get_from_file(1), None); - assert_eq!(backend.get_from_file(2), None); - // pos 3 is still in there - assert_eq!(backend.get_from_file(3), Some(pos_3_hash)); + // pos 2 is still in there + assert_eq!(backend.get_from_file(2), Some(pos_2_hash)); - // pos 4 and pos 5 are also still in there + // pos 3 and pos 4 are also still in there + assert_eq!(backend.get_from_file(3), Some(pos_3_hash)); assert_eq!(backend.get_from_file(4), Some(pos_4_hash)); - assert_eq!(backend.get_from_file(5), Some(pos_5_hash)); } } @@ -406,10 +406,10 @@ fn pmmr_rewind() { // prune the first 4 elements (leaves at pos 1, 2, 4, 5) { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); + pmmr.prune(0).unwrap(); pmmr.prune(1).unwrap(); - pmmr.prune(2).unwrap(); + pmmr.prune(3).unwrap(); pmmr.prune(4).unwrap(); - pmmr.prune(5).unwrap(); } backend.sync().unwrap(); @@ -436,26 +436,26 @@ fn pmmr_rewind() { } // Also check the data file looks correct. - // pos 1, 2, 4, 5 are all leaves but these have been pruned. - for pos in vec![1, 2, 4, 5] { + // pos 0, 1, 3, 4 are all leaves but these have been pruned. + for pos in vec![0, 1, 3, 4] { assert_eq!(backend.get_data(pos), None); } - // pos 3, 6, 7 are non-leaves so we have no data for these - for pos in vec![3, 6, 7] { + // pos 2, 5, 6 are non-leaves so we have no data for these + for pos in vec![2, 5, 6] { assert_eq!(backend.get_data(pos), None); } - // pos 8 and 9 are both leaves and should be unaffected by prior pruning + // pos 7 and 8 are both leaves and should be unaffected by prior pruning - assert_eq!(backend.get_data(8), Some(elems[4])); - assert_eq!(backend.get_hash(8), Some(elems[4].hash_with_index(7))); + assert_eq!(backend.get_data(7), Some(elems[4])); + assert_eq!(backend.get_hash(7), Some(elems[4].hash_with_index(7))); - assert_eq!(backend.get_data(9), Some(elems[5])); - assert_eq!(backend.get_hash(9), Some(elems[5].hash_with_index(8))); + assert_eq!(backend.get_data(8), Some(elems[5])); + assert_eq!(backend.get_hash(8), Some(elems[5].hash_with_index(8))); // TODO - Why is this 2 here? println!("***** backend size here: {}", backend.data_size()); - // assert_eq!(backend.data_size(), 2); + assert_eq!(backend.data_size(), 2); { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, 10); @@ -470,9 +470,9 @@ fn pmmr_rewind() { } // also check the data file looks correct - // everything up to and including pos 7 should be pruned from the data file - // but we have rewound to pos 5 so everything after that should be None - for pos in 1..17 { + // everything up to and including pos 6 should be pruned from the data file + // but we have rewound to pos 4 so everything after that should be None + for pos in 0..16 { assert_eq!(backend.get_data(pos), None); } @@ -500,8 +500,8 @@ fn pmmr_compact_single_leaves() { { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); - pmmr.prune(1).unwrap(); - pmmr.prune(4).unwrap(); + pmmr.prune(0).unwrap(); + pmmr.prune(3).unwrap(); } backend.sync().unwrap(); @@ -511,8 +511,8 @@ fn pmmr_compact_single_leaves() { { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); - pmmr.prune(2).unwrap(); - pmmr.prune(5).unwrap(); + pmmr.prune(1).unwrap(); + pmmr.prune(4).unwrap(); } backend.sync().unwrap(); @@ -534,18 +534,18 @@ fn pmmr_compact_entire_peak() { let mmr_size = load(0, &elems[0..5], &mut backend); backend.sync().unwrap(); - let pos_7_hash = backend.get_hash(7).unwrap(); + let pos_6_hash = backend.get_hash(6).unwrap(); - let pos_8 = backend.get_data(8).unwrap(); - let pos_8_hash = backend.get_hash(8).unwrap(); + let pos_7 = backend.get_data(7).unwrap(); + let pos_7_hash = backend.get_hash(7).unwrap(); // prune all leaves under the peak at pos 7 { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); + pmmr.prune(0).unwrap(); pmmr.prune(1).unwrap(); - pmmr.prune(2).unwrap(); + pmmr.prune(3).unwrap(); pmmr.prune(4).unwrap(); - pmmr.prune(5).unwrap(); } backend.sync().unwrap(); @@ -555,13 +555,13 @@ fn pmmr_compact_entire_peak() { // now check we have pruned up to and including the peak at pos 7 // hash still available in underlying hash file - assert_eq!(backend.get_hash(7), Some(pos_7_hash)); - assert_eq!(backend.get_from_file(7), Some(pos_7_hash)); + assert_eq!(backend.get_hash(6), Some(pos_6_hash)); + assert_eq!(backend.get_from_file(6), Some(pos_6_hash)); // now check we still have subsequent hash and data where we expect - assert_eq!(backend.get_data(8), Some(pos_8)); - assert_eq!(backend.get_hash(8), Some(pos_8_hash)); - assert_eq!(backend.get_from_file(8), Some(pos_8_hash)); + assert_eq!(backend.get_data(7), Some(pos_7)); + assert_eq!(backend.get_hash(7), Some(pos_7_hash)); + assert_eq!(backend.get_from_file(7), Some(pos_7_hash)); } teardown(data_dir); @@ -571,17 +571,17 @@ fn pmmr_compact_entire_peak() { fn pmmr_compact_horizon() { let (data_dir, elems) = setup("compact_horizon"); { + let pos_0_hash; let pos_1_hash; let pos_2_hash; - let pos_3_hash; + let pos_5_hash; let pos_6_hash; - let pos_7_hash; - let pos_8; - let pos_8_hash; + let pos_7; + let pos_7_hash; - let pos_11; - let pos_11_hash; + let pos_10; + let pos_10_hash; let mmr_size; { @@ -596,46 +596,46 @@ fn pmmr_compact_horizon() { assert_eq!(backend.data_size(), 19); assert_eq!(backend.hash_size(), 35); + pos_0_hash = backend.get_hash(0).unwrap(); pos_1_hash = backend.get_hash(1).unwrap(); pos_2_hash = backend.get_hash(2).unwrap(); - pos_3_hash = backend.get_hash(3).unwrap(); + pos_5_hash = backend.get_hash(5).unwrap(); pos_6_hash = backend.get_hash(6).unwrap(); - pos_7_hash = backend.get_hash(7).unwrap(); - pos_8 = backend.get_data(8).unwrap(); - pos_8_hash = backend.get_hash(8).unwrap(); + pos_7 = backend.get_data(7).unwrap(); + pos_7_hash = backend.get_hash(7).unwrap(); - pos_11 = backend.get_data(11).unwrap(); - pos_11_hash = backend.get_hash(11).unwrap(); + pos_10 = backend.get_data(10).unwrap(); + pos_10_hash = backend.get_hash(10).unwrap(); // pruning some choice nodes { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); + pmmr.prune(3).unwrap(); pmmr.prune(4).unwrap(); - pmmr.prune(5).unwrap(); + pmmr.prune(0).unwrap(); pmmr.prune(1).unwrap(); - pmmr.prune(2).unwrap(); } backend.sync().unwrap(); // check we can read hashes and data correctly after pruning { // assert_eq!(backend.get_hash(3), None); - assert_eq!(backend.get_from_file(3), Some(pos_3_hash)); + assert_eq!(backend.get_from_file(2), Some(pos_2_hash)); // assert_eq!(backend.get_hash(6), None); - assert_eq!(backend.get_from_file(6), Some(pos_6_hash)); + assert_eq!(backend.get_from_file(5), Some(pos_5_hash)); // assert_eq!(backend.get_hash(7), None); - assert_eq!(backend.get_from_file(7), Some(pos_7_hash)); + assert_eq!(backend.get_from_file(6), Some(pos_6_hash)); - assert_eq!(backend.get_hash(8), Some(pos_8_hash)); - assert_eq!(backend.get_data(8), Some(pos_8)); - assert_eq!(backend.get_from_file(8), Some(pos_8_hash)); + assert_eq!(backend.get_hash(7), Some(pos_7_hash)); + assert_eq!(backend.get_data(7), Some(pos_7)); + assert_eq!(backend.get_from_file(7), Some(pos_7_hash)); - assert_eq!(backend.get_hash(11), Some(pos_11_hash)); - assert_eq!(backend.get_data(11), Some(pos_11)); - assert_eq!(backend.get_from_file(11), Some(pos_11_hash)); + assert_eq!(backend.get_hash(10), Some(pos_10_hash)); + assert_eq!(backend.get_data(10), Some(pos_10)); + assert_eq!(backend.get_from_file(10), Some(pos_10_hash)); } // compact @@ -644,28 +644,28 @@ fn pmmr_compact_horizon() { // check we can read a hash by pos correctly after compaction { + assert_eq!(backend.get_hash(0), None); + assert_eq!(backend.get_from_file(0), Some(pos_0_hash)); + assert_eq!(backend.get_hash(1), None); assert_eq!(backend.get_from_file(1), Some(pos_1_hash)); - assert_eq!(backend.get_hash(2), None); - assert_eq!(backend.get_from_file(2), Some(pos_2_hash)); - - assert_eq!(backend.get_hash(3), Some(pos_3_hash)); + assert_eq!(backend.get_hash(2), Some(pos_2_hash)); + assert_eq!(backend.get_hash(3), None); assert_eq!(backend.get_hash(4), None); - assert_eq!(backend.get_hash(5), None); - assert_eq!(backend.get_hash(6), Some(pos_6_hash)); + assert_eq!(backend.get_hash(5), Some(pos_5_hash)); - assert_eq!(backend.get_from_file(7), Some(pos_7_hash)); + assert_eq!(backend.get_from_file(6), Some(pos_6_hash)); - assert_eq!(backend.get_hash(8), Some(pos_8_hash)); - assert_eq!(backend.get_from_file(8), Some(pos_8_hash)); + assert_eq!(backend.get_hash(7), Some(pos_7_hash)); + assert_eq!(backend.get_from_file(7), Some(pos_7_hash)); } } // recheck stored data { - // renew backend + // recreate backend let backend = store::pmmr::PMMRBackend::::new( data_dir.to_string(), true, @@ -677,12 +677,12 @@ fn pmmr_compact_horizon() { assert_eq!(backend.data_size(), 19); assert_eq!(backend.hash_size(), 35); - // check we can read a hash by pos correctly from renewd backend + // check we can read a hash by pos correctly from recreated backend + assert_eq!(backend.get_hash(6), Some(pos_6_hash)); + assert_eq!(backend.get_from_file(6), Some(pos_6_hash)); + assert_eq!(backend.get_hash(7), Some(pos_7_hash)); assert_eq!(backend.get_from_file(7), Some(pos_7_hash)); - - assert_eq!(backend.get_hash(8), Some(pos_8_hash)); - assert_eq!(backend.get_from_file(8), Some(pos_8_hash)); } { @@ -697,8 +697,8 @@ fn pmmr_compact_horizon() { { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); + pmmr.prune(7).unwrap(); pmmr.prune(8).unwrap(); - pmmr.prune(9).unwrap(); } // compact some more @@ -707,7 +707,7 @@ fn pmmr_compact_horizon() { // recheck stored data { - // renew backend + // recreate backend let backend = store::pmmr::PMMRBackend::::new( data_dir.to_string(), true, @@ -721,15 +721,15 @@ fn pmmr_compact_horizon() { assert_eq!(backend.data_size(), 13); assert_eq!(backend.hash_size(), 27); - // check we can read a hash by pos correctly from renewd backend + // check we can read a hash by pos correctly from recreated backend // get_hash() and get_from_file() should return the same value - // and we only store leaves in the leaf_set so pos 7 still has a hash in there - assert_eq!(backend.get_hash(7), Some(pos_7_hash)); - assert_eq!(backend.get_from_file(7), Some(pos_7_hash)); + // and we only store leaves in the leaf_set so pos 6 still has a hash in there + assert_eq!(backend.get_hash(6), Some(pos_6_hash)); + assert_eq!(backend.get_from_file(6), Some(pos_6_hash)); - assert_eq!(backend.get_hash(11), Some(pos_11_hash)); - assert_eq!(backend.get_data(11), Some(pos_11)); - assert_eq!(backend.get_from_file(11), Some(pos_11_hash)); + assert_eq!(backend.get_hash(10), Some(pos_10_hash)); + assert_eq!(backend.get_data(10), Some(pos_10)); + assert_eq!(backend.get_from_file(10), Some(pos_10_hash)); } } @@ -758,9 +758,9 @@ fn compact_twice() { // pruning some choice nodes { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); + pmmr.prune(0).unwrap(); pmmr.prune(1).unwrap(); - pmmr.prune(2).unwrap(); - pmmr.prune(4).unwrap(); + pmmr.prune(3).unwrap(); } backend.sync().unwrap(); @@ -768,8 +768,8 @@ fn compact_twice() { { let pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); assert_eq!(root, pmmr.root().unwrap()); - assert_eq!(pmmr.get_data(5).unwrap(), TestElem(4)); - assert_eq!(pmmr.get_data(11).unwrap(), TestElem(7)); + assert_eq!(pmmr.get_data(4).unwrap(), TestElem(4)); + assert_eq!(pmmr.get_data(10).unwrap(), TestElem(7)); } // compact @@ -779,16 +779,16 @@ fn compact_twice() { { let pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); assert_eq!(root, pmmr.root().unwrap()); - assert_eq!(pmmr.get_data(5).unwrap(), TestElem(4)); - assert_eq!(pmmr.get_data(11).unwrap(), TestElem(7)); + assert_eq!(pmmr.get_data(4).unwrap(), TestElem(4)); + assert_eq!(pmmr.get_data(10).unwrap(), TestElem(7)); } // now prune some more nodes { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); - pmmr.prune(5).unwrap(); + pmmr.prune(4).unwrap(); + pmmr.prune(7).unwrap(); pmmr.prune(8).unwrap(); - pmmr.prune(9).unwrap(); } backend.sync().unwrap(); @@ -796,7 +796,7 @@ fn compact_twice() { { let pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); assert_eq!(root, pmmr.root().unwrap()); - assert_eq!(pmmr.get_data(11).unwrap(), TestElem(7)); + assert_eq!(pmmr.get_data(10).unwrap(), TestElem(7)); } // compact @@ -806,7 +806,7 @@ fn compact_twice() { { let pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); assert_eq!(root, pmmr.root().unwrap()); - assert_eq!(pmmr.get_data(11).unwrap(), TestElem(7)); + assert_eq!(pmmr.get_data(10).unwrap(), TestElem(7)); } } @@ -820,21 +820,21 @@ fn cleanup_rewind_files_test() { let prefix_to_save = "bar"; let seconds_to_delete_after = 100; - // new the scenario + // create the scenario let (data_dir, _) = setup("cleanup_rewind_files_test"); - // new some files with the delete prefix that aren't yet old enough to delete - new_numbered_files(&data_dir, expected, prefix_to_delete, 0, 0); - // new some files with the delete prefix that are old enough to delete - new_numbered_files( + // create some files with the delete prefix that aren't yet old enough to delete + create_numbered_files(&data_dir, expected, prefix_to_delete, 0, 0); + // create some files with the delete prefix that are old enough to delete + create_numbered_files( &data_dir, expected, prefix_to_delete, seconds_to_delete_after + 1, expected, ); - // new some files with the save prefix that are old enough to delete, but will be saved because they don't start + // create some files with the save prefix that are old enough to delete, but will be saved because they don't start // with the right prefix - new_numbered_files( + create_numbered_files( &data_dir, expected, prefix_to_save, @@ -885,13 +885,13 @@ fn cleanup_rewind_files_test() { teardown(data_dir); } -/// new some files for testing with, for example +/// Create some files for testing with, for example /// /// ```text -/// new_numbered_files(".", 3, "hello.txt.", 100, 2) +/// create_numbered_files(".", 3, "hello.txt.", 100, 2) /// ``` /// -/// will new files +/// will create files /// /// ```text /// hello.txt.2 @@ -901,7 +901,7 @@ fn cleanup_rewind_files_test() { /// /// in the current working directory that are all 100 seconds old (modified and accessed time) /// -fn new_numbered_files( +fn create_numbered_files( data_dir: &str, num_files: u32, prefix: &str, diff --git a/store/tests/prune_list.rs b/store/tests/prune_list.rs index c98410d30d..a781dfbf48 100644 --- a/store/tests/prune_list.rs +++ b/store/tests/prune_list.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,53 +15,38 @@ use grin_store as store; use crate::store::prune_list::PruneList; -use croaring::Bitmap; - -// Prune list is 1-indexed but we implement this internally with a bitmap that supports a 0 value. -// We need to make sure we safely handle 0 safely. -#[test] -fn test_zero_value() { - // new a bitmap with a 0 value in it. - let mut bitmap = Bitmap::new(); - bitmap.add(0); - - // Instantiate a prune list from our existing bitmap. - let pl = PruneList::new(None, bitmap); - - // Our prune list should be empty (0 filtered out during creation). - assert!(pl.is_empty()); -} #[test] fn test_is_pruned() { let mut pl = PruneList::empty(); assert_eq!(pl.len(), 0); + assert_eq!(pl.is_pruned(0), false); assert_eq!(pl.is_pruned(1), false); assert_eq!(pl.is_pruned(2), false); - assert_eq!(pl.is_pruned(3), false); - pl.add(2); + pl.append(1); pl.flush().unwrap(); - assert_eq!(pl.to_vec(), vec![2]); - assert_eq!(pl.is_pruned(1), false); - assert_eq!(pl.is_pruned(2), true); + assert_eq!(pl.iter().collect::>(), [2]); + assert_eq!(pl.is_pruned(0), false); + assert_eq!(pl.is_pruned(1), true); + assert_eq!(pl.is_pruned(2), false); assert_eq!(pl.is_pruned(3), false); - assert_eq!(pl.is_pruned(4), false); - pl.add(2); - pl.add(1); + let mut pl = PruneList::empty(); + pl.append(0); + pl.append(1); pl.flush().unwrap(); assert_eq!(pl.len(), 1); - assert_eq!(pl.to_vec(), [3]); + assert_eq!(pl.iter().collect::>(), [3]); + assert_eq!(pl.is_pruned(0), true); assert_eq!(pl.is_pruned(1), true); assert_eq!(pl.is_pruned(2), true); - assert_eq!(pl.is_pruned(3), true); - assert_eq!(pl.is_pruned(4), false); + assert_eq!(pl.is_pruned(3), false); - pl.add(4); + pl.append(3); // Flushing the prune_list removes any individual leaf positions. // This assumes we will track these outside the prune_list via the leaf_set. @@ -69,11 +54,11 @@ fn test_is_pruned() { assert_eq!(pl.len(), 2); assert_eq!(pl.to_vec(), [3, 4]); + assert_eq!(pl.is_pruned(0), true); assert_eq!(pl.is_pruned(1), true); assert_eq!(pl.is_pruned(2), true); assert_eq!(pl.is_pruned(3), true); - assert_eq!(pl.is_pruned(4), true); - assert_eq!(pl.is_pruned(5), false); + assert_eq!(pl.is_pruned(4), false); } #[test] @@ -82,189 +67,320 @@ fn test_get_leaf_shift() { // start with an empty prune list (nothing shifted) assert_eq!(pl.len(), 0); + assert_eq!(pl.get_leaf_shift(4), 0); assert_eq!(pl.get_leaf_shift(1), 0); assert_eq!(pl.get_leaf_shift(2), 0); assert_eq!(pl.get_leaf_shift(3), 0); - assert_eq!(pl.get_leaf_shift(4), 0); // now add a single leaf pos to the prune list // leaves will not shift shift anything // we only start shifting after pruning a parent - pl.add(1); + pl.append(0); pl.flush().unwrap(); - assert_eq!(pl.to_vec(), vec![1]); + assert_eq!(pl.iter().collect::>(), [1]); + assert_eq!(pl.get_leaf_shift(0), 0); assert_eq!(pl.get_leaf_shift(1), 0); assert_eq!(pl.get_leaf_shift(2), 0); assert_eq!(pl.get_leaf_shift(3), 0); - assert_eq!(pl.get_leaf_shift(4), 0); - // now add the sibling leaf pos (pos 1 and pos 2) which will prune the parent - // at pos 3 this in turn will "leaf shift" the leaf at pos 3 by 2 - pl.add(1); - pl.add(2); + // now add the sibling leaf pos (pos 1) which will prune the parent + // at pos 2 this in turn will "leaf shift" the leaf at pos 2 by 2 + pl.append(1); pl.flush().unwrap(); assert_eq!(pl.len(), 1); + assert_eq!(pl.get_leaf_shift(0), 0); assert_eq!(pl.get_leaf_shift(1), 0); - assert_eq!(pl.get_leaf_shift(2), 0); + assert_eq!(pl.get_leaf_shift(2), 2); assert_eq!(pl.get_leaf_shift(3), 2); assert_eq!(pl.get_leaf_shift(4), 2); - assert_eq!(pl.get_leaf_shift(5), 2); - // now prune an additional leaf at pos 4 + // now prune an additional leaf at pos 3 // leaf offset of subsequent pos will be 2 // 00100120 - pl.add(4); + pl.append(3); pl.flush().unwrap(); assert_eq!(pl.len(), 2); - assert_eq!(pl.to_vec(), [3, 4]); + assert_eq!(pl.iter().collect::>(), [3, 4]); + assert_eq!(pl.get_leaf_shift(0), 0); assert_eq!(pl.get_leaf_shift(1), 0); - assert_eq!(pl.get_leaf_shift(2), 0); + assert_eq!(pl.get_leaf_shift(2), 2); assert_eq!(pl.get_leaf_shift(3), 2); assert_eq!(pl.get_leaf_shift(4), 2); assert_eq!(pl.get_leaf_shift(5), 2); assert_eq!(pl.get_leaf_shift(6), 2); assert_eq!(pl.get_leaf_shift(7), 2); - assert_eq!(pl.get_leaf_shift(8), 2); - // now prune the sibling at pos 5 - // the two smaller subtrees (pos 3 and pos 6) are rolled up to larger subtree - // (pos 7) the leaf offset is now 4 to cover entire subtree containing first + // now prune the sibling at pos 4 + // the two smaller subtrees (pos 2 and pos 5) are rolled up to larger subtree + // (pos 6) the leaf offset is now 4 to cover entire subtree containing first // 4 leaves 00100120 - pl.add(4); - pl.add(5); + pl.append(4); pl.flush().unwrap(); assert_eq!(pl.len(), 1); - assert_eq!(pl.to_vec(), [7]); + assert_eq!(pl.iter().collect::>(), [7]); + assert_eq!(pl.get_leaf_shift(0), 0); assert_eq!(pl.get_leaf_shift(1), 0); assert_eq!(pl.get_leaf_shift(2), 0); assert_eq!(pl.get_leaf_shift(3), 0); assert_eq!(pl.get_leaf_shift(4), 0); assert_eq!(pl.get_leaf_shift(5), 0); - assert_eq!(pl.get_leaf_shift(6), 0); + assert_eq!(pl.get_leaf_shift(6), 4); assert_eq!(pl.get_leaf_shift(7), 4); assert_eq!(pl.get_leaf_shift(8), 4); - assert_eq!(pl.get_leaf_shift(9), 4); - // now check we can prune some unconnected nodes in arbitrary order + // now check we can prune some unconnected nodes // and that leaf_shift is correct for various pos let mut pl = PruneList::empty(); - pl.add(5); - pl.add(11); - pl.add(12); - pl.add(4); + pl.append(3); + pl.append(4); + pl.append(10); + pl.append(11); pl.flush().unwrap(); assert_eq!(pl.len(), 2); - assert_eq!(pl.to_vec(), [6, 13]); - assert_eq!(pl.get_leaf_shift(2), 0); - assert_eq!(pl.get_leaf_shift(4), 0); + assert_eq!(pl.iter().collect::>(), [6, 13]); + assert_eq!(pl.get_leaf_shift(1), 0); + assert_eq!(pl.get_leaf_shift(3), 0); + assert_eq!(pl.get_leaf_shift(7), 2); assert_eq!(pl.get_leaf_shift(8), 2); - assert_eq!(pl.get_leaf_shift(9), 2); + assert_eq!(pl.get_leaf_shift(12), 4); assert_eq!(pl.get_leaf_shift(13), 4); - assert_eq!(pl.get_leaf_shift(14), 4); } #[test] fn test_get_shift() { let mut pl = PruneList::empty(); assert!(pl.is_empty()); + assert_eq!(pl.get_shift(0), 0); assert_eq!(pl.get_shift(1), 0); assert_eq!(pl.get_shift(2), 0); - assert_eq!(pl.get_shift(3), 0); // prune a single leaf node // pruning only a leaf node does not shift any subsequent pos // we will only start shifting when a parent can be pruned - pl.add(1); + pl.append(0); pl.flush().unwrap(); - assert_eq!(pl.to_vec(), [1]); + assert_eq!(pl.iter().collect::>(), [1]); + assert_eq!(pl.get_shift(0), 0); assert_eq!(pl.get_shift(1), 0); assert_eq!(pl.get_shift(2), 0); - assert_eq!(pl.get_shift(3), 0); - pl.add(1); - pl.add(2); + pl.append(1); pl.flush().unwrap(); - assert_eq!(pl.to_vec(), [3]); + assert_eq!(pl.iter().collect::>(), [3]); + assert_eq!(pl.get_shift(0), 0); assert_eq!(pl.get_shift(1), 0); - assert_eq!(pl.get_shift(2), 0); + assert_eq!(pl.get_shift(2), 2); assert_eq!(pl.get_shift(3), 2); assert_eq!(pl.get_shift(4), 2); assert_eq!(pl.get_shift(5), 2); - assert_eq!(pl.get_shift(6), 2); - // pos 3 is not a leaf and is already in prune list - // prune it and check we are still consistent - pl.add(3); + pl.append(3); pl.flush().unwrap(); - assert_eq!(pl.to_vec(), [3]); + assert_eq!(pl.iter().collect::>(), [3, 4]); + assert_eq!(pl.get_shift(0), 0); assert_eq!(pl.get_shift(1), 0); - assert_eq!(pl.get_shift(2), 0); + assert_eq!(pl.get_shift(2), 2); assert_eq!(pl.get_shift(3), 2); assert_eq!(pl.get_shift(4), 2); assert_eq!(pl.get_shift(5), 2); - assert_eq!(pl.get_shift(6), 2); - pl.add(4); + pl.append(4); pl.flush().unwrap(); - assert_eq!(pl.to_vec(), [3, 4]); - assert_eq!(pl.get_shift(1), 0); - assert_eq!(pl.get_shift(2), 0); - assert_eq!(pl.get_shift(3), 2); - assert_eq!(pl.get_shift(4), 2); - assert_eq!(pl.get_shift(5), 2); - assert_eq!(pl.get_shift(6), 2); - - pl.add(4); - pl.add(5); - pl.flush().unwrap(); - - assert_eq!(pl.to_vec(), [7]); + assert_eq!(pl.iter().collect::>(), [7]); + assert_eq!(pl.get_shift(0), 0); assert_eq!(pl.get_shift(1), 0); assert_eq!(pl.get_shift(2), 0); assert_eq!(pl.get_shift(3), 0); assert_eq!(pl.get_shift(4), 0); assert_eq!(pl.get_shift(5), 0); - assert_eq!(pl.get_shift(6), 0); + assert_eq!(pl.get_shift(6), 6); assert_eq!(pl.get_shift(7), 6); assert_eq!(pl.get_shift(8), 6); - assert_eq!(pl.get_shift(9), 6); // prune a bunch more - for x in 6..1000 { - pl.add(x); + for x in 5..999 { + if !pl.is_pruned(x) { + pl.append(x); + } } pl.flush().unwrap(); // and check we shift by a large number (hopefully the correct number...) - assert_eq!(pl.get_shift(1010), 996); + assert_eq!(pl.get_shift(1009), 996); + // now check we can do some sparse pruning let mut pl = PruneList::empty(); - pl.add(9); - pl.add(8); - pl.add(5); - pl.add(4); + pl.append(3); + pl.append(4); + pl.append(7); + pl.append(8); pl.flush().unwrap(); - assert_eq!(pl.to_vec(), [6, 10]); + assert_eq!(pl.iter().collect::>(), [6, 10]); + assert_eq!(pl.get_shift(0), 0); assert_eq!(pl.get_shift(1), 0); assert_eq!(pl.get_shift(2), 0); assert_eq!(pl.get_shift(3), 0); assert_eq!(pl.get_shift(4), 0); - assert_eq!(pl.get_shift(5), 0); + assert_eq!(pl.get_shift(5), 2); assert_eq!(pl.get_shift(6), 2); assert_eq!(pl.get_shift(7), 2); assert_eq!(pl.get_shift(8), 2); - assert_eq!(pl.get_shift(9), 2); + assert_eq!(pl.get_shift(9), 4); assert_eq!(pl.get_shift(10), 4); assert_eq!(pl.get_shift(11), 4); - assert_eq!(pl.get_shift(12), 4); +} + +#[test] +pub fn test_iter() { + let mut pl = PruneList::empty(); + pl.append(0); + pl.append(1); + pl.append(3); + assert_eq!(pl.iter().collect::>(), [3, 4]); + + let mut pl = PruneList::empty(); + pl.append(0); + pl.append(1); + pl.append(4); + assert_eq!(pl.iter().collect::>(), [3, 5]); +} + +#[test] +pub fn test_pruned_bintree_range_iter() { + let mut pl = PruneList::empty(); + pl.append(0); + pl.append(1); + pl.append(3); + assert_eq!( + pl.pruned_bintree_range_iter().collect::>(), + [1..4, 4..5] + ); + + let mut pl = PruneList::empty(); + pl.append(0); + pl.append(1); + pl.append(4); + assert_eq!( + pl.pruned_bintree_range_iter().collect::>(), + [1..4, 5..6] + ); +} + +#[test] +pub fn test_unpruned_iter() { + let pl = PruneList::empty(); + assert_eq!(pl.unpruned_iter(5).collect::>(), [1, 2, 3, 4, 5]); + + let mut pl = PruneList::empty(); + pl.append(1); + assert_eq!(pl.iter().collect::>(), [2]); + assert_eq!(pl.pruned_bintree_range_iter().collect::>(), [2..3]); + assert_eq!(pl.unpruned_iter(4).collect::>(), [1, 3, 4]); + + let mut pl = PruneList::empty(); + pl.append(1); + pl.append(3); + pl.append(4); + assert_eq!(pl.iter().collect::>(), [2, 6]); + assert_eq!( + pl.pruned_bintree_range_iter().collect::>(), + [2..3, 4..7] + ); + assert_eq!(pl.unpruned_iter(9).collect::>(), [1, 3, 7, 8, 9]); +} + +#[test] +fn test_unpruned_leaf_iter() { + let pl = PruneList::empty(); + assert_eq!( + pl.unpruned_leaf_iter(8).collect::>(), + [1, 2, 4, 5, 8] + ); + + let mut pl = PruneList::empty(); + pl.append(1); + assert_eq!(pl.iter().collect::>(), [2]); + assert_eq!(pl.pruned_bintree_range_iter().collect::>(), [2..3]); + assert_eq!(pl.unpruned_leaf_iter(5).collect::>(), [1, 4, 5]); + + let mut pl = PruneList::empty(); + pl.append(1); + pl.append(3); + pl.append(4); + assert_eq!(pl.iter().collect::>(), [2, 6]); + assert_eq!( + pl.pruned_bintree_range_iter().collect::>(), + [2..3, 4..7] + ); + assert_eq!(pl.unpruned_leaf_iter(9).collect::>(), [1, 8, 9]); +} + +pub fn test_append_pruned_subtree() { + let mut pl = PruneList::empty(); + + // append a pruned leaf pos (shift and leaf shift are unaffected). + pl.append(0); + + assert_eq!(pl.to_vec(), [1]); + assert_eq!(pl.get_shift(1), 0); + assert_eq!(pl.get_leaf_shift(1), 0); + + pl.append(2); + + // subtree beneath root at 2 is pruned + // pos 3 is shifted by 2 pruned hashes [1, 2] + // pos 3 is shifted by 2 leaves [1, 2] + assert_eq!(pl.to_vec(), [3]); + assert_eq!(pl.get_shift(3), 2); + assert_eq!(pl.get_leaf_shift(3), 2); + + // append another pruned subtree (ancester of previous one) + pl.append(6); + + // subtree beneath root at 6 is pruned + // pos 7 is shifted by 6 pruned hashes [1, 2, 3, 4, 5, 6] + // pos 3 is shifted by 4 leaves [1, 2, 4, 5] + assert_eq!(pl.to_vec(), [7]); + assert_eq!(pl.get_shift(7), 6); + assert_eq!(pl.get_leaf_shift(7), 4); + + // now append another pruned leaf pos + pl.append(7); + + // additional pruned leaf does not affect the shift or leaf shift + // pos 8 is shifted by 6 pruned hashes [1, 2, 3, 4, 5, 6] + // pos 8 is shifted by 4 leaves [1, 2, 4, 5] + assert_eq!(pl.to_vec(), [7, 8]); + assert_eq!(pl.get_shift(8), 6); + assert_eq!(pl.get_leaf_shift(8), 4); +} + +#[test] +fn test_recreate_prune_list() { + let mut pl = PruneList::empty(); + pl.append(3); + pl.append(4); + pl.append(10); + + let pl2 = PruneList::new(None, vec![4, 5, 11].into_iter().collect()); + + assert_eq!(pl.to_vec(), pl2.to_vec()); + assert_eq!(pl.shift_cache(), pl2.shift_cache()); + assert_eq!(pl.leaf_shift_cache(), pl2.leaf_shift_cache()); + + let pl3 = PruneList::new(None, vec![6, 11].into_iter().collect()); + + assert_eq!(pl.to_vec(), pl3.to_vec()); + assert_eq!(pl.shift_cache(), pl3.shift_cache()); + assert_eq!(pl.leaf_shift_cache(), pl3.leaf_shift_cache()); } diff --git a/store/tests/segment.rs b/store/tests/segment.rs new file mode 100644 index 0000000000..a24c1b8c5c --- /dev/null +++ b/store/tests/segment.rs @@ -0,0 +1,426 @@ +// Copyright 2021 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::core::core::hash::DefaultHashable; +use crate::core::core::pmmr; +use crate::core::core::pmmr::segment::{Segment, SegmentIdentifier}; +use crate::core::core::pmmr::{Backend, ReadablePMMR, ReadonlyPMMR, PMMR}; +use crate::core::ser::{ + BinReader, BinWriter, DeserializationMode, Error, PMMRable, ProtocolVersion, Readable, Reader, + Writeable, Writer, +}; +use crate::store::pmmr::PMMRBackend; +use chrono::Utc; +use croaring::Bitmap; +use grin_core as core; +use grin_store as store; +use std::fs; +use std::io::Cursor; + +#[test] +fn prunable_mmr() { + let t = Utc::now(); + let data_dir = format!( + "./target/tmp/{}.{}-prunable_mmr", + t.timestamp(), + t.timestamp_subsec_nanos() + ); + fs::create_dir_all(&data_dir).unwrap(); + + let n_leaves = 64 + 8 + 4 + 2 + 1; + let mut ba = PMMRBackend::new(&data_dir, true, ProtocolVersion(1), None).unwrap(); + let mut mmr = PMMR::new(&mut ba); + for i in 0..n_leaves { + mmr.push(&TestElem([i / 7, i / 5, i / 3, i])).unwrap(); + } + let last_pos = mmr.unpruned_size(); + let root = mmr.root().unwrap(); + + let mut bitmap = Bitmap::new(); + bitmap.add_range(0..n_leaves); + + let id = SegmentIdentifier { height: 3, idx: 1 }; + + // Validate a segment before any pruning + let mmr = ReadonlyPMMR::at(&mut ba, last_pos); + let segment = Segment::from_pmmr(id, &mmr, true).unwrap(); + assert_eq!( + segment.root(last_pos, Some(&bitmap)).unwrap().unwrap(), + mmr.get_hash(29).unwrap() + ); + segment.validate(last_pos, Some(&bitmap), root).unwrap(); + + // Prune a few leaves + let mut mmr = PMMR::at(&mut ba, last_pos); + prune(&mut mmr, &mut bitmap, &[8, 9, 13]); + ba.sync().unwrap(); + ba.check_compact(last_pos, &Bitmap::new()).unwrap(); + ba.sync().unwrap(); + + // Validate + let mmr = ReadonlyPMMR::at(&mut ba, last_pos); + let segment = Segment::from_pmmr(id, &mmr, true).unwrap(); + assert_eq!( + segment.root(last_pos, Some(&bitmap)).unwrap().unwrap(), + mmr.get_hash(29).unwrap() + ); + segment.validate(last_pos, Some(&bitmap), root).unwrap(); + + // Prune more + let mut mmr = PMMR::at(&mut ba, last_pos); + prune(&mut mmr, &mut bitmap, &[10, 11]); + ba.sync().unwrap(); + ba.check_compact(last_pos, &Bitmap::new()).unwrap(); + ba.sync().unwrap(); + + // Validate + let mmr = ReadonlyPMMR::at(&mut ba, last_pos); + let segment = Segment::from_pmmr(id, &mmr, true).unwrap(); + assert_eq!( + segment.root(last_pos, Some(&bitmap)).unwrap().unwrap(), + mmr.get_hash(29).unwrap() + ); + segment.validate(last_pos, Some(&bitmap), root).unwrap(); + + // Prune all but 1 + let mut mmr = PMMR::at(&mut ba, last_pos); + prune(&mut mmr, &mut bitmap, &[14, 15]); + ba.sync().unwrap(); + ba.check_compact(last_pos, &Bitmap::new()).unwrap(); + ba.sync().unwrap(); + + // Validate + let mmr = ReadonlyPMMR::at(&mut ba, last_pos); + let segment = Segment::from_pmmr(id, &mmr, true).unwrap(); + assert_eq!( + segment.root(last_pos, Some(&bitmap)).unwrap().unwrap(), + mmr.get_hash(29).unwrap() + ); + segment.validate(last_pos, Some(&bitmap), root).unwrap(); + + // Prune all + let mut mmr = PMMR::at(&mut ba, last_pos); + prune(&mut mmr, &mut bitmap, &[12]); + ba.sync().unwrap(); + ba.check_compact(last_pos, &Bitmap::new()).unwrap(); + ba.sync().unwrap(); + + let mmr = ReadonlyPMMR::at(&mut ba, last_pos); + assert!(Segment::from_pmmr(id, &mmr, true).is_ok()); + + // Final segment is not full, test it before pruning + let id = SegmentIdentifier { height: 3, idx: 9 }; + + let mmr = ReadonlyPMMR::at(&mut ba, last_pos); + let segment = Segment::from_pmmr(id, &mmr, true).unwrap(); + segment.validate(last_pos, Some(&bitmap), root).unwrap(); + + // Prune second and third to last leaves (a full peak in the MMR) + let mut mmr = PMMR::at(&mut ba, last_pos); + prune(&mut mmr, &mut bitmap, &[76, 77]); + ba.sync().unwrap(); + ba.check_compact(last_pos, &Bitmap::new()).unwrap(); + ba.sync().unwrap(); + + // Validate + let mmr = ReadonlyPMMR::at(&mut ba, last_pos); + let segment = Segment::from_pmmr(id, &mmr, true).unwrap(); + segment.validate(last_pos, Some(&bitmap), root).unwrap(); + + // Prune final element + let mut mmr = PMMR::at(&mut ba, last_pos); + prune(&mut mmr, &mut bitmap, &[78]); + ba.sync().unwrap(); + ba.check_compact(last_pos, &Bitmap::new()).unwrap(); + ba.sync().unwrap(); + + // Validate + let mmr = ReadonlyPMMR::at(&mut ba, last_pos); + let segment = Segment::from_pmmr(id, &mmr, true).unwrap(); + segment.validate(last_pos, Some(&bitmap), root).unwrap(); + + std::mem::drop(ba); + fs::remove_dir_all(&data_dir).unwrap(); +} + +#[test] +fn pruned_segment() { + let t = Utc::now(); + let data_dir = format!( + "./target/tmp/{}.{}-pruned_segment", + t.timestamp(), + t.timestamp_subsec_nanos() + ); + fs::create_dir_all(&data_dir).unwrap(); + + let n_leaves = 16; + let mut ba = PMMRBackend::new(&data_dir, true, ProtocolVersion(1), None).unwrap(); + let mut mmr = PMMR::new(&mut ba); + for i in 0..n_leaves { + mmr.push(&TestElem([i / 7, i / 5, i / 3, i])).unwrap(); + } + let last_pos = mmr.unpruned_size(); + let root = mmr.root().unwrap(); + + let mut bitmap = Bitmap::new(); + bitmap.add_range(0..n_leaves); + + // Prune all leaves of segment 1 + prune(&mut mmr, &mut bitmap, &[4, 5, 6, 7]); + ba.sync().unwrap(); + ba.check_compact(last_pos, &Bitmap::new()).unwrap(); + ba.sync().unwrap(); + + // Validate the empty segment 1 + let id = SegmentIdentifier { height: 2, idx: 1 }; + let mmr = ReadonlyPMMR::at(&mut ba, last_pos); + let segment = Segment::from_pmmr(id, &mmr, true).unwrap(); + assert_eq!(segment.leaf_iter().count(), 0); + assert_eq!(segment.hash_iter().count(), 1); + assert_eq!( + segment + .first_unpruned_parent(last_pos, Some(&bitmap)) + .unwrap(), + (ba.get_hash(13).unwrap(), 14) + ); + assert!(segment.root(last_pos, Some(&bitmap)).unwrap().is_none()); + segment.validate(last_pos, Some(&bitmap), root).unwrap(); + + // Prune all leaves of segment 0 + let mut mmr = PMMR::at(&mut ba, last_pos); + prune(&mut mmr, &mut bitmap, &[0, 1, 2, 3]); + ba.sync().unwrap(); + ba.check_compact(last_pos, &Bitmap::new()).unwrap(); + ba.sync().unwrap(); + + // Validate the empty segment 1 again + let mmr = ReadonlyPMMR::at(&mut ba, last_pos); + let segment = Segment::from_pmmr(id, &mmr, true).unwrap(); + assert_eq!(segment.leaf_iter().count(), 0); + assert_eq!(segment.hash_iter().count(), 1); + // Since both 7 and 14 are now pruned, the first unpruned hash will be at 15 + assert_eq!( + segment + .first_unpruned_parent(last_pos, Some(&bitmap)) + .unwrap(), + (ba.get_hash(14).unwrap(), 15) + ); + assert!(segment.root(last_pos, Some(&bitmap)).unwrap().is_none()); + segment.validate(last_pos, Some(&bitmap), root).unwrap(); + + // Prune all leaves of segment 2 & 3 + let mut mmr = PMMR::at(&mut ba, last_pos); + prune(&mut mmr, &mut bitmap, &[8, 9, 10, 11, 12, 13, 14, 15]); + ba.sync().unwrap(); + ba.check_compact(last_pos, &Bitmap::new()).unwrap(); + ba.sync().unwrap(); + + // Validate the empty segment 1 again + let mmr = ReadonlyPMMR::at(&mut ba, last_pos); + let segment = Segment::from_pmmr(id, &mmr, true).unwrap(); + assert_eq!(segment.leaf_iter().count(), 0); + assert_eq!(segment.hash_iter().count(), 1); + // Since both 15 and 30 are now pruned, the first unpruned hash will be at 31: the mmr root + assert_eq!( + segment + .first_unpruned_parent(last_pos, Some(&bitmap)) + .unwrap(), + (root, 31) + ); + assert!(segment.root(last_pos, Some(&bitmap)).unwrap().is_none()); + segment.validate(last_pos, Some(&bitmap), root).unwrap(); + + let n_leaves = n_leaves + 4 + 2 + 1; + let mut mmr = PMMR::at(&mut ba, last_pos); + for i in 16..n_leaves { + mmr.push(&TestElem([i / 7, i / 5, i / 3, i])).unwrap(); + } + bitmap.add_range(16..n_leaves); + let last_pos = mmr.unpruned_size(); + let root = mmr.root().unwrap(); + + // Prune all leaves of segment 4 + // The root of this segment is a direct peak of the full MMR + prune(&mut mmr, &mut bitmap, &[16, 17, 18, 19]); + ba.sync().unwrap(); + ba.check_compact(last_pos, &Bitmap::new()).unwrap(); + ba.sync().unwrap(); + + // Validate segment 4 + let id = SegmentIdentifier { height: 2, idx: 4 }; + let mmr = ReadonlyPMMR::at(&mut ba, last_pos); + let segment = Segment::from_pmmr(id, &mmr, true).unwrap(); + assert_eq!(segment.leaf_iter().count(), 0); + assert_eq!(segment.hash_iter().count(), 1); + assert_eq!( + segment + .first_unpruned_parent(last_pos, Some(&bitmap)) + .unwrap(), + (ba.get_hash(37).unwrap(), 38) + ); + assert!(segment.root(last_pos, Some(&bitmap)).unwrap().is_none()); + segment.validate(last_pos, Some(&bitmap), root).unwrap(); + + // Segment 5 has 2 peaks + let id = SegmentIdentifier { height: 2, idx: 5 }; + let mmr = ReadonlyPMMR::at(&mut ba, last_pos); + let segment = Segment::from_pmmr(id, &mmr, true).unwrap(); + assert_eq!(segment.leaf_iter().count(), 3); + assert_eq!( + segment + .first_unpruned_parent(last_pos, Some(&bitmap)) + .unwrap() + .1, + 1 + segment.segment_pos_range(last_pos).1 + ); + assert!(segment.root(last_pos, Some(&bitmap)).unwrap().is_some()); + segment.validate(last_pos, Some(&bitmap), root).unwrap(); + let prev_segment = segment; + + // Prune final leaf (a peak) + let mut mmr = PMMR::at(&mut ba, last_pos); + prune(&mut mmr, &mut bitmap, &[22]); + ba.sync().unwrap(); + ba.check_compact(last_pos, &Bitmap::new()).unwrap(); + ba.sync().unwrap(); + + // Segment 5 should be unchanged + let mmr = ReadonlyPMMR::at(&mut ba, last_pos); + let segment = Segment::from_pmmr(id, &mmr, true).unwrap(); + assert_eq!(segment, prev_segment); + segment.validate(last_pos, Some(&bitmap), root).unwrap(); + + // Prune other peak of segment 5 + let mut mmr = PMMR::at(&mut ba, last_pos); + prune(&mut mmr, &mut bitmap, &[20, 21]); + ba.sync().unwrap(); + ba.check_compact(last_pos, &Bitmap::new()).unwrap(); + ba.sync().unwrap(); + + // Validate segment 5 again + let mmr = ReadonlyPMMR::at(&mut ba, last_pos); + let segment = Segment::from_pmmr(id, &mmr, true).unwrap(); + assert_eq!(segment.leaf_iter().count(), 1); + assert_eq!(segment.hash_iter().count(), 1); + assert_eq!( + segment + .first_unpruned_parent(last_pos, Some(&bitmap)) + .unwrap() + .1, + 1 + segment.segment_pos_range(last_pos).1 + ); + assert!(segment.root(last_pos, Some(&bitmap)).unwrap().is_some()); + segment.validate(last_pos, Some(&bitmap), root).unwrap(); + + std::mem::drop(ba); + fs::remove_dir_all(&data_dir).unwrap(); +} + +#[test] +fn ser_round_trip() { + let t = Utc::now(); + let data_dir = format!( + "./target/tmp/{}.{}-segment_ser_round_trip", + t.timestamp(), + t.timestamp_subsec_nanos() + ); + fs::create_dir_all(&data_dir).unwrap(); + + let n_leaves = 32; + let mut ba = PMMRBackend::new(&data_dir, true, ProtocolVersion(1), None).unwrap(); + let mut mmr = pmmr::PMMR::new(&mut ba); + for i in 0..n_leaves { + mmr.push(&TestElem([i / 7, i / 5, i / 3, i])).unwrap(); + } + let mut bitmap = Bitmap::new(); + bitmap.add_range(0..n_leaves); + let last_pos = mmr.unpruned_size(); + + prune(&mut mmr, &mut bitmap, &[0, 1]); + ba.sync().unwrap(); + ba.check_compact(last_pos, &Bitmap::new()).unwrap(); + ba.sync().unwrap(); + + let mmr = ReadonlyPMMR::at(&ba, last_pos); + let id = SegmentIdentifier { height: 3, idx: 0 }; + let segment = Segment::from_pmmr(id, &mmr, true).unwrap(); + + let mut cursor = Cursor::new(Vec::::new()); + let mut writer = BinWriter::new(&mut cursor, ProtocolVersion(1)); + Writeable::write(&segment, &mut writer).unwrap(); + assert_eq!( + cursor.position(), + (9) + (8 + 7 * (8 + 32)) + (8 + 6 * (8 + 16)) + (8 + 2 * 32) + ); + cursor.set_position(0); + + let mut reader = BinReader::new( + &mut cursor, + ProtocolVersion(1), + DeserializationMode::default(), + ); + let segment2: Segment = Readable::read(&mut reader).unwrap(); + assert_eq!(segment, segment2); + + std::mem::drop(ba); + fs::remove_dir_all(&data_dir).unwrap(); +} + +fn prune(mmr: &mut PMMR, bitmap: &mut Bitmap, leaf_idxs: &[u64]) +where + T: PMMRable, + B: Backend, +{ + for &leaf_idx in leaf_idxs { + mmr.prune(pmmr::insertion_to_pmmr_index(leaf_idx)).unwrap(); + bitmap.remove(leaf_idx as u32); + } +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct TestElem(pub [u32; 4]); + +impl DefaultHashable for TestElem {} + +impl PMMRable for TestElem { + type E = Self; + + fn as_elmt(&self) -> Self::E { + *self + } + + fn elmt_size() -> Option { + Some(16) + } +} + +impl Writeable for TestElem { + fn write(&self, writer: &mut W) -> Result<(), Error> { + writer.write_u32(self.0[0])?; + writer.write_u32(self.0[1])?; + writer.write_u32(self.0[2])?; + writer.write_u32(self.0[3]) + } +} + +impl Readable for TestElem { + fn read(reader: &mut R) -> Result { + Ok(TestElem([ + reader.read_u32()?, + reader.read_u32()?, + reader.read_u32()?, + reader.read_u32()?, + ])) + } +} diff --git a/store/tests/test_bitmap.rs b/store/tests/test_bitmap.rs index 1b9cf666b1..0ee89cfca5 100644 --- a/store/tests/test_bitmap.rs +++ b/store/tests/test_bitmap.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -122,11 +122,11 @@ fn bench_fast_or() { let mut bitmaps = init_bitmaps(); let mut bitmap = Bitmap::new(); - let start = Utc::now().timestamp_nanos(); + let start = Utc::now().timestamp_nanos_opt().unwrap(); for _ in 0..bitmaps_number { bitmap.or_inplace(&bitmaps.pop().unwrap()); } - let fin = Utc::now().timestamp_nanos(); + let fin = Utc::now().timestamp_nanos_opt().unwrap(); let dur_ms = (fin - start) as f64 * nano_to_millis; println!( " or_inplace(): {:9.3?}ms. bitmap cardinality: {}", @@ -135,9 +135,9 @@ fn bench_fast_or() { ); let bitmaps = init_bitmaps(); - let start = Utc::now().timestamp_nanos(); + let start = Utc::now().timestamp_nanos_opt().unwrap(); let bitmap = Bitmap::fast_or(&bitmaps.iter().map(|x| x).collect::>()); - let fin = Utc::now().timestamp_nanos(); + let fin = Utc::now().timestamp_nanos_opt().unwrap(); let dur_ms = (fin - start) as f64 * nano_to_millis; println!( " fast_or(): {:9.3?}ms. bitmap cardinality: {}", @@ -146,9 +146,9 @@ fn bench_fast_or() { ); let bitmaps = init_bitmaps(); - let start = Utc::now().timestamp_nanos(); + let start = Utc::now().timestamp_nanos_opt().unwrap(); let bitmap = Bitmap::fast_or_heap(&bitmaps.iter().map(|x| x).collect::>()); - let fin = Utc::now().timestamp_nanos(); + let fin = Utc::now().timestamp_nanos_opt().unwrap(); let dur_ms = (fin - start) as f64 * nano_to_millis; println!( "fast_or_heap(): {:9.3?}ms. bitmap cardinality: {}", diff --git a/store/tests/utxo_set_perf.rs b/store/tests/utxo_set_perf.rs index 0e0e664124..a1d89b2f99 100644 --- a/store/tests/utxo_set_perf.rs +++ b/store/tests/utxo_set_perf.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -39,7 +39,7 @@ fn test_leaf_set_performance() { let now = Instant::now(); for x in 0..1_000 { for y in 0..1_000 { - let pos = (x * 1_000) + y + 1; + let pos = (x * 1_000) + y; leaf_set.add(pos); } leaf_set.flush().unwrap(); @@ -53,7 +53,7 @@ fn test_leaf_set_performance() { // Simulate looking up existence of a large number of pos in the leaf_set. let now = Instant::now(); for x in 0..1_000_000 { - assert!(leaf_set.includes(x + 1)); + assert!(leaf_set.includes(x)); } println!( "Checking 1,000,000 inclusions in leaf_set took {}ms", @@ -65,7 +65,7 @@ fn test_leaf_set_performance() { let now = Instant::now(); for x in 0..1_000 { for y in 0..1_000 { - let pos = (x * 1_000) + y + 1; + let pos = (x * 1_000) + y; leaf_set.remove(pos); } leaf_set.flush().unwrap(); diff --git a/util/Cargo.toml b/util/Cargo.toml index 8e93dd5acb..766019da4f 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "grin_util" -version = "4.4.2" +version = "5.3.2" authors = ["Grin Developers "] description = "Simple, private and scalable cryptocurrency implementation based on the MimbleWimble chain format." license = "Apache-2.0" @@ -23,9 +23,9 @@ serde_derive = "1" log4rs = { version = "0.12", features = ["rolling_file_appender", "compound_policy", "size_trigger", "fixed_window_roller"] } log = "0.4" walkdir = "2" -zip = { version = "0.5", default-features = false } +zip = { version = "0.5.11", default-features = false } parking_lot = "0.10" zeroize = { version = "1.1", features =["zeroize_derive"] } -failure = "0.1" -failure_derive = "0.1" -grin_secp256k1zkp = { git = "https://github.com/mwcproject/rust-secp256k1-zkp", tag = "0.7.13", features = ["bullet-proof-sizing"] } \ No newline at end of file +thiserror = "1" + +grin_secp256k1zkp = { git = "https://github.com/mwcproject/rust-secp256k1-zkp", tag = "0.7.14", features = ["bullet-proof-sizing"] } \ No newline at end of file diff --git a/util/src/file.rs b/util/src/file.rs index 2777c0a7d7..36bb0d77ba 100644 --- a/util/src/file.rs +++ b/util/src/file.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/util/src/hex.rs b/util/src/hex.rs index 0382a9beaf..e8a681dde0 100644 --- a/util/src/hex.rs +++ b/util/src/hex.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/util/src/lib.rs b/util/src/lib.rs index 29d1753697..95026ca2cc 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -33,8 +33,7 @@ pub use ov3::OnionV3Address; pub use ov3::OnionV3Error as OnionV3AddressError; // Re-export so only has to be included once -pub use parking_lot::Mutex; -pub use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard}; +pub use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; // Re-export so only has to be included once pub use secp256k1zkp as secp; @@ -92,8 +91,15 @@ where /// Initializes the OneTime, should only be called once after construction. /// Will panic (via assert) if called more than once. pub fn init(&self, value: T) { + self.set(value, false); + } + + /// Allows the one time to be set again with an override. + pub fn set(&self, value: T, is_override: bool) { let mut inner = self.inner.write(); - assert!(inner.is_none()); + if !is_override { + assert!(inner.is_none()); + } *inner = Some(value); } diff --git a/util/src/logger.rs b/util/src/logger.rs index be724065c6..63a442f774 100644 --- a/util/src/logger.rs +++ b/util/src/logger.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -19,7 +19,6 @@ use backtrace::Backtrace; use std::{panic, thread}; use log::{Level, Record}; -use log4rs; use log4rs::append::console::ConsoleAppender; use log4rs::append::file::FileAppender; use log4rs::append::rolling_file::{ diff --git a/util/src/macros.rs b/util/src/macros.rs index 2213bdbe4e..12400bd9f0 100644 --- a/util/src/macros.rs +++ b/util/src/macros.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/util/src/ov3.rs b/util/src/ov3.rs index 8e16153762..3e6556507f 100644 --- a/util/src/ov3.rs +++ b/util/src/ov3.rs @@ -16,20 +16,19 @@ use crate::hex::from_hex; use data_encoding::BASE32; use ed25519_dalek::PublicKey as DalekPublicKey; use ed25519_dalek::SecretKey as DalekSecretKey; -use failure::Fail; use sha3::{Digest, Sha3_256}; use std::convert::TryFrom; use std::fmt; #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] /// OnionV3 Address Errors -#[derive(Fail)] +#[derive(thiserror::Error)] pub enum OnionV3Error { /// Error decoding an address from a string - #[fail(display = "Unable to decode Onion address from a string, {}", _0)] + #[error("Unable to decode Onion address from a string, {0}")] AddressDecoding(String), /// Error with given private key - #[fail(display = "Invalid private key, {}", _0)] + #[error("Invalid private key, {0}")] InvalidPrivateKey(String), } diff --git a/util/src/rate_counter.rs b/util/src/rate_counter.rs index aa84ced963..1883739537 100644 --- a/util/src/rate_counter.rs +++ b/util/src/rate_counter.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -99,6 +99,14 @@ impl RateCounter { .filter(|x| !x.is_quiet()) .count() as u64 } + + /// Elapsed time in ms since the last entry. + /// We use this to rate limit when sending. + pub fn elapsed_since_last_msg(&self) -> Option { + self.last_min_entries + .last() + .map(|x| millis_since_epoch().saturating_sub(x.timestamp)) + } } // turns out getting the millisecs since epoch in Rust isn't as easy as it diff --git a/util/src/secp_static.rs b/util/src/secp_static.rs index f9c2b53e3d..9dc268eb92 100644 --- a/util/src/secp_static.rs +++ b/util/src/secp_static.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/util/src/types.rs b/util/src/types.rs index 364167c581..3bc9eb764e 100644 --- a/util/src/types.rs +++ b/util/src/types.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/util/src/zip.rs b/util/src/zip.rs index 5979021d92..470400484f 100644 --- a/util/src/zip.rs +++ b/util/src/zip.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -59,7 +59,7 @@ pub fn extract_files(from_archive: File, dest: &Path, files: Vec) -> io let mut archive = zip_rs::ZipArchive::new(from_archive).expect("archive file exists"); for x in files { if let Ok(file) = archive.by_name(x.to_str().expect("valid path")) { - let path = dest.join(file.name()); + let path = dest.join(file.mangled_name()); let parent_dir = path.parent().expect("valid parent dir"); fs::create_dir_all(&parent_dir).expect("create parent dir"); let outfile = fs::File::create(&path).expect("file created"); diff --git a/util/tests/file.rs b/util/tests/file.rs index 66f35c37eb..b31b6da134 100644 --- a/util/tests/file.rs +++ b/util/tests/file.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/util/tests/zip.rs b/util/tests/zip.rs index deda2204d7..1a7379c02a 100644 --- a/util/tests/zip.rs +++ b/util/tests/zip.rs @@ -1,4 +1,4 @@ -// Copyright 2020 The Grin Developers +// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License.